summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/tools
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /testing/web-platform/tests/tools
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'testing/web-platform/tests/tools')
-rw-r--r--testing/web-platform/tests/tools/.gitignore10
-rw-r--r--testing/web-platform/tests/tools/.gitmodules20
-rw-r--r--testing/web-platform/tests/tools/.travis.yml35
-rw-r--r--testing/web-platform/tests/tools/LICENSE30
-rw-r--r--testing/web-platform/tests/tools/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/html5lib/.gitignore20
-rw-r--r--testing/web-platform/tests/tools/html5lib/.gitmodules3
-rw-r--r--testing/web-platform/tests/tools/html5lib/.travis.yml37
-rw-r--r--testing/web-platform/tests/tools/html5lib/AUTHORS.rst34
-rw-r--r--testing/web-platform/tests/tools/html5lib/CHANGES.rst171
-rw-r--r--testing/web-platform/tests/tools/html5lib/CONTRIBUTING.rst60
-rw-r--r--testing/web-platform/tests/tools/html5lib/LICENSE20
-rw-r--r--testing/web-platform/tests/tools/html5lib/MANIFEST.in6
-rw-r--r--testing/web-platform/tests/tools/html5lib/README.rst157
-rw-r--r--testing/web-platform/tests/tools/html5lib/debug-info.py37
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/Makefile177
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/changes.rst3
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/conf.py280
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/html5lib.filters.rst59
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/html5lib.rst77
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/html5lib.serializer.rst19
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/html5lib.treebuilders.rst43
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/html5lib.treewalkers.rst59
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/index.rst21
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/license.rst4
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/make.bat242
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/modules.rst7
-rw-r--r--testing/web-platform/tests/tools/html5lib/doc/movingparts.rst209
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/flake8-run.sh14
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/__init__.py23
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/constants.py3104
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/_base.py12
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/alphabeticalattributes.py20
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/inject_meta_charset.py65
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/lint.py93
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/optionaltags.py205
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/sanitizer.py12
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/filters/whitespace.py38
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/html5parser.py2723
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/ihatexml.py285
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/inputstream.py886
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/sanitizer.py271
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/serializer/__init__.py16
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/serializer/htmlserializer.py320
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/README1
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/mockParser.py41
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/performance/concatenation.py36
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/support.py177
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_encoding.py67
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser.py96
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser2.py64
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_sanitizer.py105
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_serializer.py178
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_stream.py183
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_tokenizer.py188
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treeadapters.py40
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treewalkers.py353
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/test_whitespace_filter.py133
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/AUTHORS.rst34
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/LICENSE21
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/chardet/test_big5.txt51
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/test-yahoo-jp.dat10
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests1.dat394
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests2.dat115
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sanitizer/tests1.dat501
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/core.test125
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/injectmeta.test66
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/optionaltags.test965
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/options.test60
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/whitespace.test51
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sniffer/htmlOrFeed.json43
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/README.md104
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/contentModelFlags.test81
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/domjs.test96
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/entities.test283
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/escapeFlag.test33
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/namedEntities.test42210
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/numericEntities.test1313
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/pendingSpecChanges.test7
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test1.test196
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test2.test179
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test3.test6047
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test4.test344
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeChars.test1295
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeCharsProblematic.test27
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/xmlViolation.test22
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/README.md98
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption01.dat337
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption02.dat39
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/comments01.dat178
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/doctype01.dat424
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/domjs-unsafe.datbin0 -> 9884 bytes
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities01.dat723
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities02.dat283
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/html5test-com.dat291
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/inbody01.dat54
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/isindex.dat47
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/main-element.dat46
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes-plain-text-unsafe.datbin0 -> 816 bytes
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes.dat46
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/plain-text-unsafe.datbin0 -> 7925 bytes
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/ruby.dat298
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scriptdata01.dat365
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/adoption01.dat15
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/ark.dat26
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/webkit01.dat28
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tables01.dat286
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/template.dat1591
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests1.dat1959
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests10.dat847
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests11.dat482
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests12.dat62
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests14.dat75
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests15.dat216
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests16.dat2458
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests17.dat179
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests18.dat322
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests19.dat1524
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests2.dat799
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests20.dat516
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests21.dat305
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests22.dat190
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests23.dat168
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests24.dat79
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests25.dat232
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests26.dat388
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests3.dat305
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests4.dat58
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests5.dat210
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests6.dat659
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests7.dat403
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests8.dat151
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests9.dat472
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests_innerHTML_1.dat891
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tricky01.dat336
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit01.dat705
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit02.dat134
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/attributes.test1035
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-href-attribute.test787
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-target-attribute.test35
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/blockquote-cite-attribute.test7
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/classattribute.test152
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contenteditableattribute.test59
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contextmenuattribute.test115
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/dirattribute.test59
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/draggableattribute.test63
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/html-xmlns-attribute.test23
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/idattribute.test115
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/inputattributes.test2795
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/irrelevantattribute.test63
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/langattribute.test5579
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/li-value-attribute.test7
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-href-attribute.test7
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-hreflang-attribute.test7
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-rel-attribute.test271
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/ol-start-attribute.test7
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/starttags.test375
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/style-scoped-attribute.test7
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/tabindexattribute.test79
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/tokenizertotree.py68
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/us-ascii.html3
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tests/utf-8-bom.html3
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/tokenizer.py1731
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/sax.py44
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/__init__.py76
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/_base.py377
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/dom.py227
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree.py337
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree_lxml.py369
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/__init__.py57
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/_base.py200
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/dom.py46
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/etree.py138
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/genshistream.py69
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/lxmletree.py204
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/pulldom.py63
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/trie/__init__.py12
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/trie/_base.py37
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/trie/datrie.py44
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/trie/py.py67
-rw-r--r--testing/web-platform/tests/tools/html5lib/html5lib/utils.py82
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/parse.py233
-rwxr-xr-xtesting/web-platform/tests/tools/html5lib/requirements-install.sh16
-rw-r--r--testing/web-platform/tests/tools/html5lib/requirements-optional-2.6.txt5
-rw-r--r--testing/web-platform/tests/tools/html5lib/requirements-optional-cpython.txt5
-rw-r--r--testing/web-platform/tests/tools/html5lib/requirements-optional.txt13
-rw-r--r--testing/web-platform/tests/tools/html5lib/requirements-test.txt5
-rw-r--r--testing/web-platform/tests/tools/html5lib/requirements.txt1
-rw-r--r--testing/web-platform/tests/tools/html5lib/setup.py44
-rw-r--r--testing/web-platform/tests/tools/html5lib/tox.ini30
-rw-r--r--testing/web-platform/tests/tools/html5lib/utils/entities.py88
-rw-r--r--testing/web-platform/tests/tools/html5lib/utils/iana_parse.py24
-rw-r--r--testing/web-platform/tests/tools/html5lib/utils/spider.py122
-rw-r--r--testing/web-platform/tests/tools/lint/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/lint/lint.py426
-rw-r--r--testing/web-platform/tests/tools/lint/tests/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/broken.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/lint.whitelist1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/okay.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/test_file_lints.py356
-rw-r--r--testing/web-platform/tests/tools/lint/tests/test_lint.py138
-rw-r--r--testing/web-platform/tests/tools/lint/tests/test_path_lints.py25
-rw-r--r--testing/web-platform/tests/tools/localpaths.py14
-rw-r--r--testing/web-platform/tests/tools/manifest/__init__.py5
-rw-r--r--testing/web-platform/tests/tools/manifest/item.py191
-rw-r--r--testing/web-platform/tests/tools/manifest/log.py8
-rw-r--r--testing/web-platform/tests/tools/manifest/manifest.py418
-rw-r--r--testing/web-platform/tests/tools/manifest/sourcefile.py366
-rw-r--r--testing/web-platform/tests/tools/manifest/tests/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/manifest/tests/test_manifest.py80
-rw-r--r--testing/web-platform/tests/tools/manifest/tests/test_sourcefile.py251
-rw-r--r--testing/web-platform/tests/tools/manifest/tests/test_utils.py28
-rw-r--r--testing/web-platform/tests/tools/manifest/tree.py168
-rw-r--r--testing/web-platform/tests/tools/manifest/update.py119
-rw-r--r--testing/web-platform/tests/tools/manifest/utils.py52
-rw-r--r--testing/web-platform/tests/tools/manifest/vcs.py25
-rw-r--r--testing/web-platform/tests/tools/py/AUTHORS24
-rw-r--r--testing/web-platform/tests/tools/py/CHANGELOG1089
-rw-r--r--testing/web-platform/tests/tools/py/LICENSE19
-rw-r--r--testing/web-platform/tests/tools/py/MANIFEST.in9
-rw-r--r--testing/web-platform/tests/tools/py/README.txt21
-rw-r--r--testing/web-platform/tests/tools/py/bench/localpath.py75
-rw-r--r--testing/web-platform/tests/tools/py/conftest.py71
-rw-r--r--testing/web-platform/tests/tools/py/doc/Makefile133
-rw-r--r--testing/web-platform/tests/tools/py/doc/_templates/layout.html18
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-0.9.0.txt7
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-0.9.2.txt27
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.0.0.txt63
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.0.1.txt48
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.0.2.txt5
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.1.0.txt115
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.1.1.txt48
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.2.0.txt116
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.2.1.txt66
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.3.0.txt580
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.3.1.txt104
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.3.2.txt720
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.3.3.txt26
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.3.4.txt22
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.4.0.txt47
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/release-1.4.1.txt47
-rw-r--r--testing/web-platform/tests/tools/py/doc/announce/releases.txt16
-rw-r--r--testing/web-platform/tests/tools/py/doc/changelog.txt3
-rw-r--r--testing/web-platform/tests/tools/py/doc/code.txt150
-rw-r--r--testing/web-platform/tests/tools/py/doc/conf.py263
-rw-r--r--testing/web-platform/tests/tools/py/doc/download.html18
-rw-r--r--testing/web-platform/tests/tools/py/doc/example/genhtml.py13
-rw-r--r--testing/web-platform/tests/tools/py/doc/example/genhtmlcss.py23
-rw-r--r--testing/web-platform/tests/tools/py/doc/example/genxml.py17
-rw-r--r--testing/web-platform/tests/tools/py/doc/faq.txt172
-rw-r--r--testing/web-platform/tests/tools/py/doc/img/pylib.pngbin0 -> 8276 bytes
-rw-r--r--testing/web-platform/tests/tools/py/doc/index.txt43
-rw-r--r--testing/web-platform/tests/tools/py/doc/install.txt88
-rw-r--r--testing/web-platform/tests/tools/py/doc/io.txt59
-rw-r--r--testing/web-platform/tests/tools/py/doc/links.inc16
-rw-r--r--testing/web-platform/tests/tools/py/doc/log.txt208
-rw-r--r--testing/web-platform/tests/tools/py/doc/misc.txt93
-rw-r--r--testing/web-platform/tests/tools/py/doc/path.txt260
-rw-r--r--testing/web-platform/tests/tools/py/doc/style.css1044
-rw-r--r--testing/web-platform/tests/tools/py/doc/xml.txt164
-rw-r--r--testing/web-platform/tests/tools/py/py/__init__.py150
-rw-r--r--testing/web-platform/tests/tools/py/py/__metainfo.py2
-rw-r--r--testing/web-platform/tests/tools/py/py/_apipkg.py181
-rw-r--r--testing/web-platform/tests/tools/py/py/_builtin.py248
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/_assertionnew.py339
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/_assertionold.py555
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/_py2traceback.py79
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/assertion.py94
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/code.py787
-rw-r--r--testing/web-platform/tests/tools/py/py/_code/source.py419
-rw-r--r--testing/web-platform/tests/tools/py/py/_error.py88
-rw-r--r--testing/web-platform/tests/tools/py/py/_iniconfig.py162
-rw-r--r--testing/web-platform/tests/tools/py/py/_io/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/py/_io/capture.py371
-rw-r--r--testing/web-platform/tests/tools/py/py/_io/saferepr.py71
-rw-r--r--testing/web-platform/tests/tools/py/py/_io/terminalwriter.py348
-rw-r--r--testing/web-platform/tests/tools/py/py/_log/__init__.py2
-rw-r--r--testing/web-platform/tests/tools/py/py/_log/log.py186
-rw-r--r--testing/web-platform/tests/tools/py/py/_log/warning.py76
-rw-r--r--testing/web-platform/tests/tools/py/py/_path/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/py/_path/cacheutil.py114
-rw-r--r--testing/web-platform/tests/tools/py/py/_path/common.py403
-rw-r--r--testing/web-platform/tests/tools/py/py/_path/local.py911
-rw-r--r--testing/web-platform/tests/tools/py/py/_path/svnurl.py380
-rw-r--r--testing/web-platform/tests/tools/py/py/_path/svnwc.py1240
-rw-r--r--testing/web-platform/tests/tools/py/py/_process/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/py/_process/cmdexec.py49
-rw-r--r--testing/web-platform/tests/tools/py/py/_process/forkedfunc.py120
-rw-r--r--testing/web-platform/tests/tools/py/py/_process/killproc.py23
-rw-r--r--testing/web-platform/tests/tools/py/py/_std.py18
-rw-r--r--testing/web-platform/tests/tools/py/py/_xmlgen.py253
-rw-r--r--testing/web-platform/tests/tools/py/py/test.py10
-rw-r--r--testing/web-platform/tests/tools/py/setup.cfg5
-rw-r--r--testing/web-platform/tests/tools/py/setup.py38
-rw-r--r--testing/web-platform/tests/tools/py/testing/code/test_assertion.py308
-rw-r--r--testing/web-platform/tests/tools/py/testing/code/test_code.py159
-rw-r--r--testing/web-platform/tests/tools/py/testing/code/test_excinfo.py909
-rw-r--r--testing/web-platform/tests/tools/py/testing/code/test_source.py651
-rw-r--r--testing/web-platform/tests/tools/py/testing/conftest.py3
-rw-r--r--testing/web-platform/tests/tools/py/testing/io_/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/testing/io_/test_capture.py501
-rw-r--r--testing/web-platform/tests/tools/py/testing/io_/test_saferepr.py78
-rw-r--r--testing/web-platform/tests/tools/py/testing/io_/test_terminalwriter.py271
-rw-r--r--testing/web-platform/tests/tools/py/testing/log/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/py/testing/log/test_log.py190
-rw-r--r--testing/web-platform/tests/tools/py/testing/log/test_warning.py76
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/common.py470
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/conftest.py80
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/repotest.dump228
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/svntestbase.py31
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/test_cacheutil.py84
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/test_local.py860
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/test_svnauth.py454
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/test_svnurl.py95
-rw-r--r--testing/web-platform/tests/tools/py/testing/path/test_svnwc.py549
-rw-r--r--testing/web-platform/tests/tools/py/testing/process/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/testing/process/test_cmdexec.py39
-rw-r--r--testing/web-platform/tests/tools/py/testing/process/test_forkedfunc.py177
-rw-r--r--testing/web-platform/tests/tools/py/testing/process/test_killproc.py16
-rw-r--r--testing/web-platform/tests/tools/py/testing/root/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/py/testing/root/test_builtin.py179
-rw-r--r--testing/web-platform/tests/tools/py/testing/root/test_error.py37
-rw-r--r--testing/web-platform/tests/tools/py/testing/root/test_py_imports.py68
-rw-r--r--testing/web-platform/tests/tools/py/testing/root/test_std.py13
-rw-r--r--testing/web-platform/tests/tools/py/testing/root/test_xmlgen.py145
-rw-r--r--testing/web-platform/tests/tools/py/testing/test_iniconfig.py299
-rw-r--r--testing/web-platform/tests/tools/py/tox.ini39
-rw-r--r--testing/web-platform/tests/tools/pytest.ini2
-rw-r--r--testing/web-platform/tests/tools/pytest/.gitattributes1
-rw-r--r--testing/web-platform/tests/tools/pytest/.github/ISSUE_TEMPLATE.md8
-rw-r--r--testing/web-platform/tests/tools/pytest/.github/PULL_REQUEST_TEMPLATE.md8
-rw-r--r--testing/web-platform/tests/tools/pytest/.gitignore34
-rw-r--r--testing/web-platform/tests/tools/pytest/.travis.yml40
-rw-r--r--testing/web-platform/tests/tools/pytest/AUTHORS87
-rw-r--r--testing/web-platform/tests/tools/pytest/CHANGELOG.rst2586
-rw-r--r--testing/web-platform/tests/tools/pytest/CONTRIBUTING.rst253
-rw-r--r--testing/web-platform/tests/tools/pytest/HOWTORELEASE.rst92
-rw-r--r--testing/web-platform/tests/tools/pytest/ISSUES.txt365
-rw-r--r--testing/web-platform/tests/tools/pytest/LICENSE21
-rw-r--r--testing/web-platform/tests/tools/pytest/MANIFEST.in34
-rw-r--r--testing/web-platform/tests/tools/pytest/README.rst102
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/__init__.py2
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/_argcomplete.py101
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/_code/__init__.py12
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/_code/_py2traceback.py81
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/_code/code.py795
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/_code/source.py421
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/_pluggy.py11
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/assertion/__init__.py176
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/assertion/reinterpret.py407
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/assertion/rewrite.py885
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/assertion/util.py332
-rwxr-xr-xtesting/web-platform/tests/tools/pytest/_pytest/cacheprovider.py245
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/capture.py472
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/config.py1192
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/doctest.py290
-rwxr-xr-xtesting/web-platform/tests/tools/pytest/_pytest/genscript.py132
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/helpconfig.py139
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/hookspec.py295
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/impl254
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/junitxml.py387
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/main.py744
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/mark.py311
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/monkeypatch.py254
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/nose.py71
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/pastebin.py92
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/pdb.py109
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/pytester.py1110
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/python.py2302
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/recwarn.py221
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/resultlog.py104
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/runner.py515
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/skipping.py354
-rwxr-xr-xtesting/web-platform/tests/tools/pytest/_pytest/standalonetemplate.py89
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/terminal.py593
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/tmpdir.py123
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/unittest.py205
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/README.md13
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst10
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA39
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD8
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL6
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json1
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json1
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt1
-rw-r--r--testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy.py777
-rw-r--r--testing/web-platform/tests/tools/pytest/appveyor.yml28
-rw-r--r--testing/web-platform/tests/tools/pytest/bench/bench.py12
-rw-r--r--testing/web-platform/tests/tools/pytest/bench/bench_argcomplete.py19
-rw-r--r--testing/web-platform/tests/tools/pytest/bench/empty.py3
-rw-r--r--testing/web-platform/tests/tools/pytest/bench/manyparam.py12
-rw-r--r--testing/web-platform/tests/tools/pytest/bench/skip.py10
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/Makefile164
-rwxr-xr-xtesting/web-platform/tests/tools/pytest/doc/en/_getdoctarget.py16
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_templates/globaltoc.html18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_templates/layout.html34
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_templates/links.html16
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_templates/sidebarintro.html5
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/.gitignore3
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/LICENSE37
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/README31
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/layout.html24
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/relations.html19
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/static/flasky.css_t557
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/theme.conf9
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/_themes/flask_theme_support.py86
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/adopt.rst78
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/index.rst48
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.0.rst129
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.1.rst67
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.2.rst73
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.3.rst40
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.0.rst47
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.1.rst37
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.2.rst33
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.3.rst32
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.0.rst95
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.1.rst41
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.2.rst43
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.4.rst39
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.0.rst134
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.1.rst39
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.2.rst57
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.3.rst62
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.4.rst39
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.5.rst97
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.0.rst225
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.1.rst25
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.2.rst39
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.0.rst175
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.1.rst47
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.2.rst64
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.0.rst153
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.1.rst59
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.2.rst52
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.3.rst52
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.0.rst101
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.1.rst58
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.2.rst58
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.2.rst44
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.3.rst59
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.4.rst52
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.5.rst39
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.6.rst67
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.7.rst31
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.0.rst159
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.1.rst65
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/announce/sprint2016.rst105
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/assert.rst289
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/bash-completion.rst28
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/builtin.rst134
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/cache.rst278
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/capture.rst118
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/changelog.rst7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/check_sphinx.py17
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/conf.py326
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/conftest.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/contact.rst51
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/contents.rst39
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/contributing.rst3
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/customize.rst228
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/doctest.rst105
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py238
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py10
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py5
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py14
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py42
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst79
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py3
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py6
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/index.rst34
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg4
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst592
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py52
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst91
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py40
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst475
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py16
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py11
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst192
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst598
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst751
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/special.rst72
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py30
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/faq.rst165
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/feedback.rst8
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/fixture.rst987
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/funcarg_compare.rst217
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/funcargs.rst14
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/genapi.py41
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/getting-started.rst237
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/goodpractices.rst278
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/cramer2.pngbin0 -> 25291 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/freiburg2.jpgbin0 -> 104057 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/gaynor3.pngbin0 -> 23032 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/keleshev.pngbin0 -> 23246 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/pullrequest.pngbin0 -> 17035 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/pylib.pngbin0 -> 8276 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/pytest1.pngbin0 -> 6010 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/pytest1favi.icobin0 -> 3742 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/img/theuni.pngbin0 -> 31476 bytes
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/index.rst61
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/license.rst32
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/links.inc21
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/mark.rst40
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/monkeypatch.rst82
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/naming20.rst20
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/nose.rst55
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/overview.rst13
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/parametrize.rst219
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/plugins.rst159
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/projects.rst85
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/pytest.ini2
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/recwarn.rst130
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/setup.rst10
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/skipping.rst373
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/status.rst5
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/talks.rst116
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/attic.rst117
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/config.html18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/dist.html18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/extend.html18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/index.rst35
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/mission.rst13
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/cov.rst230
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/coverage.rst51
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/django.rst7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/figleaf.rst44
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/genscript.rst28
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/helpconfig.rst38
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/index.rst68
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/links.rst47
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/nose.rst56
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/oejskit.rst12
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/terminal.rst40
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/plugin/xdist.rst172
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/test/test.html18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/tmpdir.rst111
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/unittest.rst190
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/usage.rst275
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/writing_plugins.rst575
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/xdist.rst197
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/xunit_setup.rst90
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/yieldfixture.rst100
-rw-r--r--testing/web-platform/tests/tools/pytest/extra/get_issues.py74
-rw-r--r--testing/web-platform/tests/tools/pytest/extra/setup-py.test/setup.py11
-rw-r--r--testing/web-platform/tests/tools/pytest/plugin-test.sh20
-rw-r--r--testing/web-platform/tests/tools/pytest/pytest.py28
-rw-r--r--testing/web-platform/tests/tools/pytest/requirements-docs.txt3
-rw-r--r--testing/web-platform/tests/tools/pytest/runtox.py8
-rw-r--r--testing/web-platform/tests/tools/pytest/setup.cfg13
-rw-r--r--testing/web-platform/tests/tools/pytest/setup.py122
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/acceptance_test.py695
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/code/test_code.py174
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/code/test_excinfo.py911
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/code/test_source.py659
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/cx_freeze/install_cx_freeze.py64
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_script.py9
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_setup.py15
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_doctest.txt6
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_trivial.py6
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/cx_freeze/tox_run.py15
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/python/collect.py1200
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/python/fixture.py2693
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/python/integration.py369
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/python/metafunc.py1094
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/python/raises.py78
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_argcomplete.py90
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_assertinterpret.py274
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_assertion.py628
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_assertrewrite.py716
-rwxr-xr-xtesting/web-platform/tests/tools/pytest/testing/test_cache.py386
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_capture.py1068
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_collection.py641
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_config.py570
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_conftest.py409
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_doctest.py715
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_genscript.py51
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_helpconfig.py69
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_junitxml.py816
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_mark.py672
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_monkeypatch.py330
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_nose.py394
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_parseopt.py287
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_pastebin.py115
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_pdb.py313
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_pluginmanager.py340
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_pytester.py122
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_recwarn.py227
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_resultlog.py236
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_runner.py634
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_runner_xunit.py252
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_session.py244
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_skipping.py917
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_terminal.py880
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_tmpdir.py183
-rw-r--r--testing/web-platform/tests/tools/pytest/testing/test_unittest.py737
-rw-r--r--testing/web-platform/tests/tools/pytest/tox.ini160
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/COPYING28
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/MANIFEST.in6
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/README17
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/abort_handshake_wsh.py43
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/abort_wsh.py43
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/arraybuffer_benchmark.html134
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/bench_wsh.py60
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.html203
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.js309
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/benchmark_helper_wsh.py85
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/close_wsh.py69
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/console.html317
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/cookie_wsh.py32
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/example/echo_client.py1128
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/echo_noext_wsh.py61
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/echo_wsh.py54
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/example/eventsource.cgi54
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.html74
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/handler_map.txt11
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/hsts_wsh.py40
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/internal_error_wsh.py42
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/origin_check_wsh.py44
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/pywebsocket.conf42
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/example/special_headers.cgi28
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/util.js177
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/util_main.js63
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/util_worker.js19
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.html222
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.js389
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/example/xhr_event_logger.html110
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/__init__.py224
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py181
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hixie75.py229
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hybi.py887
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/common.py303
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/dispatch.py393
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py885
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/fast_masking.i98
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/__init__.py110
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py182
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py420
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi00.py293
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py254
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/http_header_util.py263
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/memorizingfile.py99
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py219
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/mux.py1889
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/standalone.py1193
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/stream.py57
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/util.py416
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/xhr_benchmark_handler.py109
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/setup.py74
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/cert/cacert.pem17
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/cert/cert.pem61
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/cert/client_cert.p12bin0 -> 2582 bytes
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/cert/key.pem15
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/client_for_testing.py1100
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/endtoend_with_external_server.py67
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/mock.py221
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/mux_client_for_testing.py690
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/run_all.py89
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/set_sys_path.py45
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_dispatch.py288
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_endtoend.py753
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_extensions.py360
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_handshake.py188
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi.py534
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi00.py516
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_http_header_util.py90
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_memorizingfile.py104
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_mock.py145
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_msgutil.py1356
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/test_mux.py2089
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_stream.py77
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_stream_hixie75.py59
-rwxr-xr-xtesting/web-platform/tests/tools/pywebsocket/src/test/test_util.py200
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/README1
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/abort_by_user_wsh.py42
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py31
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/origin_check_wsh.py42
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/exception_in_transfer_wsh.py44
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/no_wsh_at_the_end.py45
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/non_callable_wsh.py39
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py40
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py45
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py45
-rw-r--r--testing/web-platform/tests/tools/pywebsocket/src/test/testdata/hello.pl32
-rw-r--r--testing/web-platform/tests/tools/runner/css/bootstrap-theme.min.css5
-rw-r--r--testing/web-platform/tests/tools/runner/css/bootstrap.min.css5
-rw-r--r--testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.eotbin0 -> 20335 bytes
-rw-r--r--testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.svg229
-rw-r--r--testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.ttfbin0 -> 41280 bytes
-rw-r--r--testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.woffbin0 -> 23320 bytes
-rw-r--r--testing/web-platform/tests/tools/runner/index.html219
-rw-r--r--testing/web-platform/tests/tools/runner/logo.svg8
-rw-r--r--testing/web-platform/tests/tools/runner/report.css43
-rw-r--r--testing/web-platform/tests/tools/runner/report.py307
-rw-r--r--testing/web-platform/tests/tools/runner/runner.css208
-rw-r--r--testing/web-platform/tests/tools/runner/runner.js854
-rw-r--r--testing/web-platform/tests/tools/runner/update_manifest.py19
-rw-r--r--testing/web-platform/tests/tools/scripts/id2path.js12
-rw-r--r--testing/web-platform/tests/tools/scripts/id2path.json822
-rw-r--r--testing/web-platform/tests/tools/scripts/manifest.js140
-rw-r--r--testing/web-platform/tests/tools/scripts/package.json11
-rw-r--r--testing/web-platform/tests/tools/scripts/toc.js107
-rw-r--r--testing/web-platform/tests/tools/scripts/update-directory-structure.js106
-rw-r--r--testing/web-platform/tests/tools/serve/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/serve/serve.py617
-rw-r--r--testing/web-platform/tests/tools/six/.gitignore9
-rw-r--r--testing/web-platform/tests/tools/six/CHANGES246
-rw-r--r--testing/web-platform/tests/tools/six/CONTRIBUTORS22
-rw-r--r--testing/web-platform/tests/tools/six/LICENSE18
-rw-r--r--testing/web-platform/tests/tools/six/MANIFEST.in6
-rw-r--r--testing/web-platform/tests/tools/six/README16
-rw-r--r--testing/web-platform/tests/tools/six/documentation/Makefile130
-rw-r--r--testing/web-platform/tests/tools/six/documentation/conf.py217
-rw-r--r--testing/web-platform/tests/tools/six/documentation/index.rst785
-rw-r--r--testing/web-platform/tests/tools/six/setup.cfg2
-rw-r--r--testing/web-platform/tests/tools/six/setup.py32
-rw-r--r--testing/web-platform/tests/tools/six/six.py787
-rw-r--r--testing/web-platform/tests/tools/six/test_six.py787
-rw-r--r--testing/web-platform/tests/tools/six/tox.ini12
-rw-r--r--testing/web-platform/tests/tools/sslutils/__init__.py9
-rw-r--r--testing/web-platform/tests/tools/sslutils/base.py23
-rw-r--r--testing/web-platform/tests/tools/sslutils/openssl.py405
-rw-r--r--testing/web-platform/tests/tools/sslutils/pregenerated.py26
-rw-r--r--testing/web-platform/tests/tools/tox.ini19
-rw-r--r--testing/web-platform/tests/tools/webdriver/.gitignore1
-rw-r--r--testing/web-platform/tests/tools/webdriver/COPYING373
-rw-r--r--testing/web-platform/tests/tools/webdriver/README.md75
-rw-r--r--testing/web-platform/tests/tools/webdriver/setup.py18
-rw-r--r--testing/web-platform/tests/tools/webdriver/webdriver/__init__.py31
-rw-r--r--testing/web-platform/tests/tools/webdriver/webdriver/client.py595
-rw-r--r--testing/web-platform/tests/tools/webdriver/webdriver/error.py144
-rw-r--r--testing/web-platform/tests/tools/webdriver/webdriver/servo.py22
-rw-r--r--testing/web-platform/tests/tools/webdriver/webdriver/transport.py83
-rw-r--r--testing/web-platform/tests/tools/wptserve/.gitignore40
-rw-r--r--testing/web-platform/tests/tools/wptserve/.travis.yml24
-rw-r--r--testing/web-platform/tests/tools/wptserve/LICENSE30
-rw-r--r--testing/web-platform/tests/tools/wptserve/MANIFEST.in1
-rw-r--r--testing/web-platform/tests/tools/wptserve/README.md4
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/Makefile153
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/conf.py243
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/handlers.rst111
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/index.rst35
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/introduction.rst51
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/make.bat190
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/pipes.rst157
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/request.rst10
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/response.rst41
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/router.rst78
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/server.rst20
-rw-r--r--testing/web-platform/tests/tools/wptserve/docs/stash.rst31
-rw-r--r--testing/web-platform/tests/tools/wptserve/setup.py23
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/base.py65
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/document.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/invalid.py3
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/no_main.py3
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.sub.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.sub.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.sub.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/subdir/file.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test.asis5
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_string.py3
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_2.py2
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_3.py2
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt1
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt.sub.headers6
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_cookies.py61
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_handlers.py299
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_pipes.py77
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_request.py82
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_response.py47
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_server.py41
-rw-r--r--testing/web-platform/tests/tools/wptserve/tests/functional/test_stash.py41
-rw-r--r--testing/web-platform/tests/tools/wptserve/tox.ini17
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/__init__.py3
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/constants.py92
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/handlers.py370
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/logger.py29
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/pipes.py449
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/ranges.py90
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/request.py589
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/response.py473
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/router.py168
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/routes.py6
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/server.py461
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/stash.py143
-rw-r--r--testing/web-platform/tests/tools/wptserve/wptserve/utils.py14
-rwxr-xr-xtesting/web-platform/tests/tools/wptserve/wptserve/wptserve.py33
807 files changed, 217376 insertions, 0 deletions
diff --git a/testing/web-platform/tests/tools/.gitignore b/testing/web-platform/tests/tools/.gitignore
new file mode 100644
index 000000000..6197a8354
--- /dev/null
+++ b/testing/web-platform/tests/tools/.gitignore
@@ -0,0 +1,10 @@
+*#
+.coverage*
+htmlcov/
+coverage.xml
+.tox
+*.py[co]
+*.sw[po]
+*~
+\#*
+runner/MANIFEST.json
diff --git a/testing/web-platform/tests/tools/.gitmodules b/testing/web-platform/tests/tools/.gitmodules
new file mode 100644
index 000000000..12cee6914
--- /dev/null
+++ b/testing/web-platform/tests/tools/.gitmodules
@@ -0,0 +1,20 @@
+[submodule "tools/wptserve"]
+ path = wptserve
+ url = https://github.com/w3c/wptserve.git
+ ignore = dirty
+[submodule "tools/pywebsocket"]
+ path = pywebsocket
+ url = https://github.com/w3c/pywebsocket.git
+ ignore = dirty
+[submodule "html5lib"]
+ path = html5lib
+ url = https://github.com/html5lib/html5lib-python.git
+[submodule "tools/six"]
+ path = six
+ url = https://github.com/jgraham/six.git
+[submodule "pytest"]
+ path = pytest
+ url = https://github.com/pytest-dev/pytest.git
+[submodule "webdriver"]
+ path = webdriver
+ url = https://github.com/w3c/wdclient.git
diff --git a/testing/web-platform/tests/tools/.travis.yml b/testing/web-platform/tests/tools/.travis.yml
new file mode 100644
index 000000000..13c4f046b
--- /dev/null
+++ b/testing/web-platform/tests/tools/.travis.yml
@@ -0,0 +1,35 @@
+language: python
+
+sudo: false
+
+cache:
+ directories:
+ - $HOME/.cache/pip
+
+matrix:
+ include:
+ - python: 2.7
+ env: TOXENV=py27
+ - python: 3.5
+ env: TOXENV=py35
+ - python: pypy
+ env: TOXENV=pypy
+
+# An ugly hack needed to make py.test believe our top level can be
+# imported (on Travis CI, we end up in a wpt-tools directory, and of
+# course you cannot import a name including a hyphen in Python, so it
+# ignores the fact we have a __init__.py at the top level).
+before_install:
+ - mv `pwd` /tmp/tools
+ - cd /tmp/tools
+ - export TRAVIS_BUILD_DIR=/tmp/tools
+
+install:
+ - pip install -U tox codecov
+
+script:
+ - tox
+
+after_success:
+ - coverage combine
+ - codecov
diff --git a/testing/web-platform/tests/tools/LICENSE b/testing/web-platform/tests/tools/LICENSE
new file mode 100644
index 000000000..45896e6be
--- /dev/null
+++ b/testing/web-platform/tests/tools/LICENSE
@@ -0,0 +1,30 @@
+W3C 3-clause BSD License
+
+http://www.w3.org/Consortium/Legal/2008/03-bsd-license.html
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of works must retain the original copyright notice,
+ this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the original copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* Neither the name of the W3C nor the names of its contributors may be
+ used to endorse or promote products derived from this work without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/testing/web-platform/tests/tools/__init__.py b/testing/web-platform/tests/tools/__init__.py
new file mode 100644
index 000000000..07b5e60fb
--- /dev/null
+++ b/testing/web-platform/tests/tools/__init__.py
@@ -0,0 +1 @@
+from . import localpaths as _localpaths
diff --git a/testing/web-platform/tests/tools/html5lib/.gitignore b/testing/web-platform/tests/tools/html5lib/.gitignore
new file mode 100644
index 000000000..73d97fec0
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/.gitignore
@@ -0,0 +1,20 @@
+# Because we never want compiled Python
+__pycache__/
+*.pyc
+
+# Ignore stuff produced by distutils
+/build/
+/dist/
+/MANIFEST
+
+# Generated by parse.py -p
+stats.prof
+
+# From cover (esp. in combination with nose)
+.coverage
+
+# Because tox's data is inherently local
+/.tox/
+
+# We have no interest in built Sphinx files
+/doc/_build
diff --git a/testing/web-platform/tests/tools/html5lib/.gitmodules b/testing/web-platform/tests/tools/html5lib/.gitmodules
new file mode 100644
index 000000000..dbca47032
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "testdata"]
+ path = html5lib/tests/testdata
+ url = https://github.com/html5lib/html5lib-tests.git
diff --git a/testing/web-platform/tests/tools/html5lib/.travis.yml b/testing/web-platform/tests/tools/html5lib/.travis.yml
new file mode 100644
index 000000000..dd3130019
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/.travis.yml
@@ -0,0 +1,37 @@
+language: python
+python:
+ - "2.6"
+ - "2.7"
+ - "3.2"
+ - "3.3"
+ - "3.4"
+ - "pypy"
+
+env:
+ - USE_OPTIONAL=true
+ - USE_OPTIONAL=false
+
+matrix:
+ exclude:
+ - python: "2.7"
+ env: USE_OPTIONAL=false
+ - python: "3.4"
+ env: USE_OPTIONAL=false
+ include:
+ - python: "2.7"
+ env: USE_OPTIONAL=false FLAKE=true
+ - python: "3.4"
+ env: USE_OPTIONAL=false FLAKE=true
+
+before_install:
+ - git submodule update --init --recursive
+
+install:
+ - bash requirements-install.sh
+
+script:
+ - nosetests
+ - bash flake8-run.sh
+
+after_script:
+ - python debug-info.py
diff --git a/testing/web-platform/tests/tools/html5lib/AUTHORS.rst b/testing/web-platform/tests/tools/html5lib/AUTHORS.rst
new file mode 100644
index 000000000..4148a6ed2
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/AUTHORS.rst
@@ -0,0 +1,34 @@
+Credits
+=======
+
+``html5lib`` is written and maintained by:
+
+- James Graham
+- Geoffrey Sneddon
+- Åukasz Langa
+
+
+Patches and suggestions
+-----------------------
+(In chronological order, by first commit:)
+
+- Anne van Kesteren
+- Lachlan Hunt
+- lantis63
+- Sam Ruby
+- Tim Fletcher
+- Thomas Broyer
+- Mark Pilgrim
+- Philip Taylor
+- Ryan King
+- Edward Z. Yang
+- fantasai
+- Philip Jägenstedt
+- Ms2ger
+- Andy Wingo
+- Andreas Madsack
+- Karim Valiev
+- Mohammad Taha Jahangir
+- Juan Carlos Garcia Segovia
+- Mike West
+- Marc DM
diff --git a/testing/web-platform/tests/tools/html5lib/CHANGES.rst b/testing/web-platform/tests/tools/html5lib/CHANGES.rst
new file mode 100644
index 000000000..1431b3c9b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/CHANGES.rst
@@ -0,0 +1,171 @@
+Change Log
+----------
+
+0.9999
+~~~~~~
+
+Released on XXX, 2014
+
+* XXX
+
+
+0.999
+~~~~~
+
+Released on December 23, 2013
+
+* Fix #127: add work-around for CPython issue #20007: .read(0) on
+ http.client.HTTPResponse drops the rest of the content.
+
+* Fix #115: lxml treewalker can now deal with fragments containing, at
+ their root level, text nodes with non-ASCII characters on Python 2.
+
+
+0.99
+~~~~
+
+Released on September 10, 2013
+
+* No library changes from 1.0b3; released as 0.99 as pip has changed
+ behaviour from 1.4 to avoid installing pre-release versions per
+ PEP 440.
+
+
+1.0b3
+~~~~~
+
+Released on July 24, 2013
+
+* Removed ``RecursiveTreeWalker`` from ``treewalkers._base``. Any
+ implementation using it should be moved to
+ ``NonRecursiveTreeWalker``, as everything bundled with html5lib has
+ for years.
+
+* Fix #67 so that ``BufferedStream`` to correctly returns a bytes
+ object, thereby fixing any case where html5lib is passed a
+ non-seekable RawIOBase-like object.
+
+
+1.0b2
+~~~~~
+
+Released on June 27, 2013
+
+* Removed reordering of attributes within the serializer. There is now
+ an ``alphabetical_attributes`` option which preserves the previous
+ behaviour through a new filter. This allows attribute order to be
+ preserved through html5lib if the tree builder preserves order.
+
+* Removed ``dom2sax`` from DOM treebuilders. It has been replaced by
+ ``treeadapters.sax.to_sax`` which is generic and supports any
+ treewalker; it also resolves all known bugs with ``dom2sax``.
+
+* Fix treewalker assertions on hitting bytes strings on
+ Python 2. Previous to 1.0b1, treewalkers coped with mixed
+ bytes/unicode data on Python 2; this reintroduces this prior
+ behaviour on Python 2. Behaviour is unchanged on Python 3.
+
+
+1.0b1
+~~~~~
+
+Released on May 17, 2013
+
+* Implementation updated to implement the `HTML specification
+ <http://www.whatwg.org/specs/web-apps/current-work/>`_ as of 5th May
+ 2013 (`SVN <http://svn.whatwg.org/webapps/>`_ revision r7867).
+
+* Python 3.2+ supported in a single codebase using the ``six`` library.
+
+* Removed support for Python 2.5 and older.
+
+* Removed the deprecated Beautiful Soup 3 treebuilder.
+ ``beautifulsoup4`` can use ``html5lib`` as a parser instead. Note that
+ since it doesn't support namespaces, foreign content like SVG and
+ MathML is parsed incorrectly.
+
+* Removed ``simpletree`` from the package. The default tree builder is
+ now ``etree`` (using the ``xml.etree.cElementTree`` implementation if
+ available, and ``xml.etree.ElementTree`` otherwise).
+
+* Removed the ``XHTMLSerializer`` as it never actually guaranteed its
+ output was well-formed XML, and hence provided little of use.
+
+* Removed default DOM treebuilder, so ``html5lib.treebuilders.dom`` is no
+ longer supported. ``html5lib.treebuilders.getTreeBuilder("dom")`` will
+ return the default DOM treebuilder, which uses ``xml.dom.minidom``.
+
+* Optional heuristic character encoding detection now based on
+ ``charade`` for Python 2.6 - 3.3 compatibility.
+
+* Optional ``Genshi`` treewalker support fixed.
+
+* Many bugfixes, including:
+
+ * #33: null in attribute value breaks XML AttValue;
+
+ * #4: nested, indirect descendant, <button> causes infinite loop;
+
+ * `Google Code 215
+ <http://code.google.com/p/html5lib/issues/detail?id=215>`_: Properly
+ detect seekable streams;
+
+ * `Google Code 206
+ <http://code.google.com/p/html5lib/issues/detail?id=206>`_: add
+ support for <video preload=...>, <audio preload=...>;
+
+ * `Google Code 205
+ <http://code.google.com/p/html5lib/issues/detail?id=205>`_: add
+ support for <video poster=...>;
+
+ * `Google Code 202
+ <http://code.google.com/p/html5lib/issues/detail?id=202>`_: Unicode
+ file breaks InputStream.
+
+* Source code is now mostly PEP 8 compliant.
+
+* Test harness has been improved and now depends on ``nose``.
+
+* Documentation updated and moved to http://html5lib.readthedocs.org/.
+
+
+0.95
+~~~~
+
+Released on February 11, 2012
+
+
+0.90
+~~~~
+
+Released on January 17, 2010
+
+
+0.11.1
+~~~~~~
+
+Released on June 12, 2008
+
+
+0.11
+~~~~
+
+Released on June 10, 2008
+
+
+0.10
+~~~~
+
+Released on October 7, 2007
+
+
+0.9
+~~~
+
+Released on March 11, 2007
+
+
+0.2
+~~~
+
+Released on January 8, 2007
diff --git a/testing/web-platform/tests/tools/html5lib/CONTRIBUTING.rst b/testing/web-platform/tests/tools/html5lib/CONTRIBUTING.rst
new file mode 100644
index 000000000..8c5e19853
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/CONTRIBUTING.rst
@@ -0,0 +1,60 @@
+Contributing
+============
+
+Pull requests are more than welcome — both to the library and to the
+documentation. Some useful information:
+
+- We aim to follow PEP 8 in the library, but ignoring the
+ 79-character-per-line limit, instead following a soft limit of 99,
+ but allowing lines over this where it is the readable thing to do.
+
+- We aim to follow PEP 257 for all docstrings, and make them properly
+ parseable by Sphinx while generating API documentation.
+
+- We keep ``pyflakes`` reporting no errors or warnings at all times.
+
+- We keep the master branch passing all tests at all times on all
+ supported versions.
+
+`Travis CI <https://travis-ci.org/html5lib/html5lib-python/>`_ is run
+against all pull requests and should enforce all of the above.
+
+We use `Opera Critic <https://critic.hoppipolla.co.uk/>`_ as an external
+code-review tool, which uses your GitHub login to authenticate. You'll
+get email notifications for issues raised in the review.
+
+
+Patch submission guidelines
+---------------------------
+
+- **Create a new Git branch specific to your change.** Do not put
+ multiple fixes/features in the same pull request. If you find an
+ unrelated bug, create a distinct branch and submit a separate pull
+ request for the bugfix. This makes life much easier for maintainers
+ and will speed up merging your patches.
+
+- **Write a test** whenever possible. Following existing tests is often
+ easiest, and a good way to tell whether the feature you're modifying
+ is easily testable.
+
+- **Make sure documentation is updated.** Keep docstrings current, and
+ if necessary, update the Sphinx documentation in ``doc/``.
+
+- **Add a changelog entry** at the top of ``CHANGES.rst`` following
+ existing entries' styles.
+
+- **Run tests with tox** if possible, to make sure your changes are
+ compatible with all supported Python versions.
+
+- **Squash commits** before submitting the pull request so that a single
+ commit contains the entire change, and only that change (see the first
+ bullet).
+
+- **Don't rebase after creating the pull request.** Merge with upstream,
+ if necessary, and use ``git commit --fixup`` for fixing issues raised
+ in a Critic review or by a failing Travis build. The reviewer will
+ squash and rebase your pull request while accepting it. Even though
+ GitHub won't recognize the pull request as accepted, the squashed
+ commits will properly specify you as the author.
+
+- **Attribute yourself** in ``AUTHORS.rst``.
diff --git a/testing/web-platform/tests/tools/html5lib/LICENSE b/testing/web-platform/tests/tools/html5lib/LICENSE
new file mode 100644
index 000000000..c87fa7a00
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2006-2013 James Graham and other contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/testing/web-platform/tests/tools/html5lib/MANIFEST.in b/testing/web-platform/tests/tools/html5lib/MANIFEST.in
new file mode 100644
index 000000000..1edd0b7de
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/MANIFEST.in
@@ -0,0 +1,6 @@
+include LICENSE
+include CHANGES.rst
+include README.rst
+include requirements*.txt
+graft html5lib/tests/testdata
+recursive-include html5lib/tests *.py
diff --git a/testing/web-platform/tests/tools/html5lib/README.rst b/testing/web-platform/tests/tools/html5lib/README.rst
new file mode 100644
index 000000000..9e0a0f740
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/README.rst
@@ -0,0 +1,157 @@
+html5lib
+========
+
+.. image:: https://travis-ci.org/html5lib/html5lib-python.png?branch=master
+ :target: https://travis-ci.org/html5lib/html5lib-python
+
+html5lib is a pure-python library for parsing HTML. It is designed to
+conform to the WHATWG HTML specification, as is implemented by all major
+web browsers.
+
+
+Usage
+-----
+
+Simple usage follows this pattern:
+
+.. code-block:: python
+
+ import html5lib
+ with open("mydocument.html", "rb") as f:
+ document = html5lib.parse(f)
+
+or:
+
+.. code-block:: python
+
+ import html5lib
+ document = html5lib.parse("<p>Hello World!")
+
+By default, the ``document`` will be an ``xml.etree`` element instance.
+Whenever possible, html5lib chooses the accelerated ``ElementTree``
+implementation (i.e. ``xml.etree.cElementTree`` on Python 2.x).
+
+Two other tree types are supported: ``xml.dom.minidom`` and
+``lxml.etree``. To use an alternative format, specify the name of
+a treebuilder:
+
+.. code-block:: python
+
+ import html5lib
+ with open("mydocument.html", "rb") as f:
+ lxml_etree_document = html5lib.parse(f, treebuilder="lxml")
+
+When using with ``urllib2`` (Python 2), the charset from HTTP should be
+pass into html5lib as follows:
+
+.. code-block:: python
+
+ from contextlib import closing
+ from urllib2 import urlopen
+ import html5lib
+
+ with closing(urlopen("http://example.com/")) as f:
+ document = html5lib.parse(f, encoding=f.info().getparam("charset"))
+
+When using with ``urllib.request`` (Python 3), the charset from HTTP
+should be pass into html5lib as follows:
+
+.. code-block:: python
+
+ from urllib.request import urlopen
+ import html5lib
+
+ with urlopen("http://example.com/") as f:
+ document = html5lib.parse(f, encoding=f.info().get_content_charset())
+
+To have more control over the parser, create a parser object explicitly.
+For instance, to make the parser raise exceptions on parse errors, use:
+
+.. code-block:: python
+
+ import html5lib
+ with open("mydocument.html", "rb") as f:
+ parser = html5lib.HTMLParser(strict=True)
+ document = parser.parse(f)
+
+When you're instantiating parser objects explicitly, pass a treebuilder
+class as the ``tree`` keyword argument to use an alternative document
+format:
+
+.. code-block:: python
+
+ import html5lib
+ parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom"))
+ minidom_document = parser.parse("<p>Hello World!")
+
+More documentation is available at http://html5lib.readthedocs.org/.
+
+
+Installation
+------------
+
+html5lib works on CPython 2.6+, CPython 3.2+ and PyPy. To install it,
+use:
+
+.. code-block:: bash
+
+ $ pip install html5lib
+
+
+Optional Dependencies
+---------------------
+
+The following third-party libraries may be used for additional
+functionality:
+
+- ``datrie`` can be used to improve parsing performance (though in
+ almost all cases the improvement is marginal);
+
+- ``lxml`` is supported as a tree format (for both building and
+ walking) under CPython (but *not* PyPy where it is known to cause
+ segfaults);
+
+- ``genshi`` has a treewalker (but not builder); and
+
+- ``charade`` can be used as a fallback when character encoding cannot
+ be determined; ``chardet``, from which it was forked, can also be used
+ on Python 2.
+
+- ``ordereddict`` can be used under Python 2.6
+ (``collections.OrderedDict`` is used instead on later versions) to
+ serialize attributes in alphabetical order.
+
+
+Bugs
+----
+
+Please report any bugs on the `issue tracker
+<https://github.com/html5lib/html5lib-python/issues>`_.
+
+
+Tests
+-----
+
+Unit tests require the ``nose`` library and can be run using the
+``nosetests`` command in the root directory; ``ordereddict`` is
+required under Python 2.6. All should pass.
+
+Test data are contained in a separate `html5lib-tests
+<https://github.com/html5lib/html5lib-tests>`_ repository and included
+as a submodule, thus for git checkouts they must be initialized::
+
+ $ git submodule init
+ $ git submodule update
+
+If you have all compatible Python implementations available on your
+system, you can run tests on all of them using the ``tox`` utility,
+which can be found on PyPI.
+
+
+Questions?
+----------
+
+There's a mailing list available for support on Google Groups,
+`html5lib-discuss <http://groups.google.com/group/html5lib-discuss>`_,
+though you may get a quicker response asking on IRC in `#whatwg on
+irc.freenode.net <http://wiki.whatwg.org/wiki/IRC>`_.
diff --git a/testing/web-platform/tests/tools/html5lib/debug-info.py b/testing/web-platform/tests/tools/html5lib/debug-info.py
new file mode 100644
index 000000000..b5d2bb6a3
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/debug-info.py
@@ -0,0 +1,37 @@
+from __future__ import print_function, unicode_literals
+
+import platform
+import sys
+
+
+info = {
+ "impl": platform.python_implementation(),
+ "version": platform.python_version(),
+ "revision": platform.python_revision(),
+ "maxunicode": sys.maxunicode,
+ "maxsize": sys.maxsize
+}
+
+search_modules = ["charade", "chardet", "datrie", "genshi", "html5lib", "lxml", "six"]
+found_modules = []
+
+for m in search_modules:
+ try:
+ __import__(m)
+ except ImportError:
+ pass
+ else:
+ found_modules.append(m)
+
+info["modules"] = ", ".join(found_modules)
+
+
+print("""html5lib debug info:
+
+Python %(version)s (revision: %(revision)s)
+Implementation: %(impl)s
+
+sys.maxunicode: %(maxunicode)X
+sys.maxsize: %(maxsize)X
+
+Installed modules: %(modules)s""" % info)
diff --git a/testing/web-platform/tests/tools/html5lib/doc/Makefile b/testing/web-platform/tests/tools/html5lib/doc/Makefile
new file mode 100644
index 000000000..e0e58667e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/html5lib.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/html5lib.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/html5lib"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/html5lib"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/testing/web-platform/tests/tools/html5lib/doc/changes.rst b/testing/web-platform/tests/tools/html5lib/doc/changes.rst
new file mode 100644
index 000000000..ded3b705d
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/changes.rst
@@ -0,0 +1,3 @@
+.. :changelog:
+
+.. include:: ../CHANGES.rst
diff --git a/testing/web-platform/tests/tools/html5lib/doc/conf.py b/testing/web-platform/tests/tools/html5lib/doc/conf.py
new file mode 100644
index 000000000..434f21c42
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/conf.py
@@ -0,0 +1,280 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# html5lib documentation build configuration file, created by
+# sphinx-quickstart on Wed May 8 00:04:49 2013.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'html5lib'
+copyright = '2006 - 2013, James Graham, Geoffrey Sneddon, and contributors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.0'
+# The full version, including alpha/beta/rc tags.
+sys.path.append(os.path.abspath('..'))
+from html5lib import __version__
+release = __version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = 'en'
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build', 'theme']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'html5libdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'html5lib.tex', 'html5lib Documentation',
+ 'James Graham, Geoffrey Sneddon, and contributors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'html5lib', 'html5lib Documentation',
+ ['James Graham, Geoffrey Sneddon, and contributors'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'html5lib', 'html5lib Documentation',
+ 'James Graham, Geoffrey Sneddon, and contributors', 'html5lib', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+class CExtMock(object):
+ """Required for autodoc on readthedocs.org where you cannot build C extensions."""
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __call__(self, *args, **kwargs):
+ return CExtMock()
+
+ @classmethod
+ def __getattr__(cls, name):
+ if name in ('__file__', '__path__'):
+ return '/dev/null'
+ else:
+ return CExtMock()
+
+try:
+ import lxml # flake8: noqa
+except ImportError:
+ sys.modules['lxml'] = CExtMock()
+ sys.modules['lxml.etree'] = CExtMock()
+ print("warning: lxml modules mocked.")
+
+try:
+ import genshi # flake8: noqa
+except ImportError:
+ sys.modules['genshi'] = CExtMock()
+ sys.modules['genshi.core'] = CExtMock()
+ print("warning: genshi modules mocked.")
diff --git a/testing/web-platform/tests/tools/html5lib/doc/html5lib.filters.rst b/testing/web-platform/tests/tools/html5lib/doc/html5lib.filters.rst
new file mode 100644
index 000000000..1fda38a73
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/html5lib.filters.rst
@@ -0,0 +1,59 @@
+filters Package
+===============
+
+:mod:`_base` Module
+-------------------
+
+.. automodule:: html5lib.filters._base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`alphabeticalattributes` Module
+------------------------------------
+
+.. automodule:: html5lib.filters.alphabeticalattributes
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`inject_meta_charset` Module
+---------------------------------
+
+.. automodule:: html5lib.filters.inject_meta_charset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`lint` Module
+------------------
+
+.. automodule:: html5lib.filters.lint
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`optionaltags` Module
+--------------------------
+
+.. automodule:: html5lib.filters.optionaltags
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`sanitizer` Module
+-----------------------
+
+.. automodule:: html5lib.filters.sanitizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`whitespace` Module
+------------------------
+
+.. automodule:: html5lib.filters.whitespace
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
diff --git a/testing/web-platform/tests/tools/html5lib/doc/html5lib.rst b/testing/web-platform/tests/tools/html5lib/doc/html5lib.rst
new file mode 100644
index 000000000..d4ed12b46
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/html5lib.rst
@@ -0,0 +1,77 @@
+html5lib Package
+================
+
+:mod:`html5lib` Package
+-----------------------
+
+.. automodule:: html5lib.__init__
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`constants` Module
+-----------------------
+
+.. automodule:: html5lib.constants
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`html5parser` Module
+-------------------------
+
+.. automodule:: html5lib.html5parser
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`ihatexml` Module
+----------------------
+
+.. automodule:: html5lib.ihatexml
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`inputstream` Module
+-------------------------
+
+.. automodule:: html5lib.inputstream
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`sanitizer` Module
+-----------------------
+
+.. automodule:: html5lib.sanitizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`tokenizer` Module
+-----------------------
+
+.. automodule:: html5lib.tokenizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`utils` Module
+-------------------
+
+.. automodule:: html5lib.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Subpackages
+-----------
+
+.. toctree::
+
+ html5lib.filters
+ html5lib.serializer
+ html5lib.treebuilders
+ html5lib.treewalkers
+
diff --git a/testing/web-platform/tests/tools/html5lib/doc/html5lib.serializer.rst b/testing/web-platform/tests/tools/html5lib/doc/html5lib.serializer.rst
new file mode 100644
index 000000000..fa9547421
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/html5lib.serializer.rst
@@ -0,0 +1,19 @@
+serializer Package
+==================
+
+:mod:`serializer` Package
+-------------------------
+
+.. automodule:: html5lib.serializer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`htmlserializer` Module
+----------------------------
+
+.. automodule:: html5lib.serializer.htmlserializer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
diff --git a/testing/web-platform/tests/tools/html5lib/doc/html5lib.treebuilders.rst b/testing/web-platform/tests/tools/html5lib/doc/html5lib.treebuilders.rst
new file mode 100644
index 000000000..991198394
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/html5lib.treebuilders.rst
@@ -0,0 +1,43 @@
+treebuilders Package
+====================
+
+:mod:`treebuilders` Package
+---------------------------
+
+.. automodule:: html5lib.treebuilders
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`_base` Module
+-------------------
+
+.. automodule:: html5lib.treebuilders._base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`dom` Module
+-----------------
+
+.. automodule:: html5lib.treebuilders.dom
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`etree` Module
+-------------------
+
+.. automodule:: html5lib.treebuilders.etree
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`etree_lxml` Module
+------------------------
+
+.. automodule:: html5lib.treebuilders.etree_lxml
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
diff --git a/testing/web-platform/tests/tools/html5lib/doc/html5lib.treewalkers.rst b/testing/web-platform/tests/tools/html5lib/doc/html5lib.treewalkers.rst
new file mode 100644
index 000000000..80595e2d7
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/html5lib.treewalkers.rst
@@ -0,0 +1,59 @@
+treewalkers Package
+===================
+
+:mod:`treewalkers` Package
+--------------------------
+
+.. automodule:: html5lib.treewalkers
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`_base` Module
+-------------------
+
+.. automodule:: html5lib.treewalkers._base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`dom` Module
+-----------------
+
+.. automodule:: html5lib.treewalkers.dom
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`etree` Module
+-------------------
+
+.. automodule:: html5lib.treewalkers.etree
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`genshistream` Module
+--------------------------
+
+.. automodule:: html5lib.treewalkers.genshistream
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`lxmletree` Module
+-----------------------
+
+.. automodule:: html5lib.treewalkers.lxmletree
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:mod:`pulldom` Module
+---------------------
+
+.. automodule:: html5lib.treewalkers.pulldom
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
diff --git a/testing/web-platform/tests/tools/html5lib/doc/index.rst b/testing/web-platform/tests/tools/html5lib/doc/index.rst
new file mode 100644
index 000000000..ca2e1b969
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/index.rst
@@ -0,0 +1,21 @@
+Overview
+========
+
+.. include:: ../README.rst
+ :start-line: 6
+
+.. toctree::
+ :maxdepth: 2
+
+ movingparts
+ changes
+ License <license>
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/testing/web-platform/tests/tools/html5lib/doc/license.rst b/testing/web-platform/tests/tools/html5lib/doc/license.rst
new file mode 100644
index 000000000..7e6291f3b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/license.rst
@@ -0,0 +1,4 @@
+License
+=======
+
+.. include:: ../LICENSE
diff --git a/testing/web-platform/tests/tools/html5lib/doc/make.bat b/testing/web-platform/tests/tools/html5lib/doc/make.bat
new file mode 100644
index 000000000..e88c769ce
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/make.bat
@@ -0,0 +1,242 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. xml to make Docutils-native XML files
+ echo. pseudoxml to make pseudoxml-XML files for display purposes
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\html5lib.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\html5lib.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdf" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf
+ cd %BUILDDIR%/..
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdfja" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf-ja
+ cd %BUILDDIR%/..
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+if "%1" == "xml" (
+ %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The XML files are in %BUILDDIR%/xml.
+ goto end
+)
+
+if "%1" == "pseudoxml" (
+ %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+ goto end
+)
+
+:end
diff --git a/testing/web-platform/tests/tools/html5lib/doc/modules.rst b/testing/web-platform/tests/tools/html5lib/doc/modules.rst
new file mode 100644
index 000000000..59fbcc86b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/modules.rst
@@ -0,0 +1,7 @@
+html5lib
+========
+
+.. toctree::
+ :maxdepth: 4
+
+ html5lib
diff --git a/testing/web-platform/tests/tools/html5lib/doc/movingparts.rst b/testing/web-platform/tests/tools/html5lib/doc/movingparts.rst
new file mode 100644
index 000000000..36539785a
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/doc/movingparts.rst
@@ -0,0 +1,209 @@
+The moving parts
+================
+
+html5lib consists of a number of components, which are responsible for
+handling its features.
+
+
+Tree builders
+-------------
+
+The parser reads HTML by tokenizing the content and building a tree that
+the user can later access. There are three main types of trees that
+html5lib can build:
+
+* ``etree`` - this is the default; builds a tree based on ``xml.etree``,
+ which can be found in the standard library. Whenever possible, the
+ accelerated ``ElementTree`` implementation (i.e.
+ ``xml.etree.cElementTree`` on Python 2.x) is used.
+
+* ``dom`` - builds a tree based on ``xml.dom.minidom``.
+
+* ``lxml.etree`` - uses lxml's implementation of the ``ElementTree``
+ API. The performance gains are relatively small compared to using the
+ accelerated ``ElementTree`` module.
+
+You can specify the builder by name when using the shorthand API:
+
+.. code-block:: python
+
+ import html5lib
+ with open("mydocument.html", "rb") as f:
+ lxml_etree_document = html5lib.parse(f, treebuilder="lxml")
+
+When instantiating a parser object, you have to pass a tree builder
+class in the ``tree`` keyword attribute:
+
+.. code-block:: python
+
+ import html5lib
+ parser = html5lib.HTMLParser(tree=SomeTreeBuilder)
+ document = parser.parse("<p>Hello World!")
+
+To get a builder class by name, use the ``getTreeBuilder`` function:
+
+.. code-block:: python
+
+ import html5lib
+ parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom"))
+ minidom_document = parser.parse("<p>Hello World!")
+
+The implementation of builders can be found in `html5lib/treebuilders/
+<https://github.com/html5lib/html5lib-python/tree/master/html5lib/treebuilders>`_.
+
+
+Tree walkers
+------------
+
+Once a tree is ready, you can work on it either manually, or using
+a tree walker, which provides a streaming view of the tree. html5lib
+provides walkers for all three supported types of trees (``etree``,
+``dom`` and ``lxml``).
+
+The implementation of walkers can be found in `html5lib/treewalkers/
+<https://github.com/html5lib/html5lib-python/tree/master/html5lib/treewalkers>`_.
+
+Walkers make consuming HTML easier. html5lib uses them to provide you
+with has a couple of handy tools.
+
+
+HTMLSerializer
+~~~~~~~~~~~~~~
+
+The serializer lets you write HTML back as a stream of bytes.
+
+.. code-block:: pycon
+
+ >>> import html5lib
+ >>> element = html5lib.parse('<p xml:lang="pl">Witam wszystkich')
+ >>> walker = html5lib.getTreeWalker("etree")
+ >>> stream = walker(element)
+ >>> s = html5lib.serializer.HTMLSerializer()
+ >>> output = s.serialize(stream)
+ >>> for item in output:
+ ... print("%r" % item)
+ '<p'
+ ' '
+ 'xml:lang'
+ '='
+ 'pl'
+ '>'
+ 'Witam wszystkich'
+
+You can customize the serializer behaviour in a variety of ways, consult
+the :class:`~html5lib.serializer.htmlserializer.HTMLSerializer`
+documentation.
+
+
+Filters
+~~~~~~~
+
+You can alter the stream content with filters provided by html5lib:
+
+* :class:`alphabeticalattributes.Filter
+ <html5lib.filters.alphabeticalattributes.Filter>` sorts attributes on
+ tags to be in alphabetical order
+
+* :class:`inject_meta_charset.Filter
+ <html5lib.filters.inject_meta_charset.Filter>` sets a user-specified
+ encoding in the correct ``<meta>`` tag in the ``<head>`` section of
+ the document
+
+* :class:`lint.Filter <html5lib.filters.lint.Filter>` raises
+ ``LintError`` exceptions on invalid tag and attribute names, invalid
+ PCDATA, etc.
+
+* :class:`optionaltags.Filter <html5lib.filters.optionaltags.Filter>`
+ removes tags from the stream which are not necessary to produce valid
+ HTML
+
+* :class:`sanitizer.Filter <html5lib.filters.sanitizer.Filter>` removes
+ unsafe markup and CSS. Elements that are known to be safe are passed
+ through and the rest is converted to visible text. The default
+ configuration of the sanitizer follows the `WHATWG Sanitization Rules
+ <http://wiki.whatwg.org/wiki/Sanitization_rules>`_.
+
+* :class:`whitespace.Filter <html5lib.filters.whitespace.Filter>`
+ collapses all whitespace characters to single spaces unless they're in
+ ``<pre/>`` or ``textarea`` tags.
+
+To use a filter, simply wrap it around a stream:
+
+.. code-block:: python
+
+ >>> import html5lib
+ >>> from html5lib.filters import sanitizer
+ >>> dom = html5lib.parse("<p><script>alert('Boo!')", treebuilder="dom")
+ >>> walker = html5lib.getTreeWalker("dom")
+ >>> stream = walker(dom)
+ >>> sane_stream = sanitizer.Filter(stream) clean_stream = sanitizer.Filter(stream)
+
+
+Tree adapters
+-------------
+
+Used to translate one type of tree to another. More documentation
+pending, sorry.
+
+
+Encoding discovery
+------------------
+
+Parsed trees are always Unicode. However a large variety of input
+encodings are supported. The encoding of the document is determined in
+the following way:
+
+* The encoding may be explicitly specified by passing the name of the
+ encoding as the encoding parameter to the
+ :meth:`~html5lib.html5parser.HTMLParser.parse` method on
+ ``HTMLParser`` objects.
+
+* If no encoding is specified, the parser will attempt to detect the
+ encoding from a ``<meta>`` element in the first 512 bytes of the
+ document (this is only a partial implementation of the current HTML
+ 5 specification).
+
+* If no encoding can be found and the chardet library is available, an
+ attempt will be made to sniff the encoding from the byte pattern.
+
+* If all else fails, the default encoding will be used. This is usually
+ `Windows-1252 <http://en.wikipedia.org/wiki/Windows-1252>`_, which is
+ a common fallback used by Web browsers.
+
+
+Tokenizers
+----------
+
+The part of the parser responsible for translating a raw input stream
+into meaningful tokens is the tokenizer. Currently html5lib provides
+two.
+
+To set up a tokenizer, simply pass it when instantiating
+a :class:`~html5lib.html5parser.HTMLParser`:
+
+.. code-block:: python
+
+ import html5lib
+ from html5lib import sanitizer
+
+ p = html5lib.HTMLParser(tokenizer=sanitizer.HTMLSanitizer)
+ p.parse("<p>Surprise!<script>alert('Boo!');</script>")
+
+HTMLTokenizer
+~~~~~~~~~~~~~
+
+This is the default tokenizer, the heart of html5lib. The implementation
+can be found in `html5lib/tokenizer.py
+<https://github.com/html5lib/html5lib-python/blob/master/html5lib/tokenizer.py>`_.
+
+HTMLSanitizer
+~~~~~~~~~~~~~
+
+This is a tokenizer that removes unsafe markup and CSS styles from the
+input. Elements that are known to be safe are passed through and the
+rest is converted to visible text. The default configuration of the
+sanitizer follows the `WHATWG Sanitization Rules
+<http://wiki.whatwg.org/wiki/Sanitization_rules>`_.
+
+The implementation can be found in `html5lib/sanitizer.py
+<https://github.com/html5lib/html5lib-python/blob/master/html5lib/sanitizer.py>`_.
diff --git a/testing/web-platform/tests/tools/html5lib/flake8-run.sh b/testing/web-platform/tests/tools/html5lib/flake8-run.sh
new file mode 100755
index 000000000..d1a587d35
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/flake8-run.sh
@@ -0,0 +1,14 @@
+#!/bin/bash -e
+
+if [[ ! -x $(which flake8) ]]; then
+ echo "fatal: flake8 not found on $PATH. Exiting."
+ exit 1
+fi
+
+if [[ $TRAVIS != "true" || $FLAKE == "true" ]]; then
+ find html5lib/ -name '*.py' -and -not -name 'constants.py' -print0 | xargs -0 flake8 --ignore=E501
+ flake1=$?
+ flake8 --max-line-length=99 --ignore=E126 html5lib/constants.py
+ flake2=$?
+ exit $[$flake1 || $flake2]
+fi
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/__init__.py
new file mode 100644
index 000000000..a67a652b9
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/__init__.py
@@ -0,0 +1,23 @@
+"""
+HTML parsing library based on the WHATWG "HTML5"
+specification. The parser is designed to be compatible with existing
+HTML found in the wild and implements well-defined error recovery that
+is largely compatible with modern desktop web browsers.
+
+Example usage:
+
+import html5lib
+f = open("my_document.html")
+tree = html5lib.parse(f)
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+from .html5parser import HTMLParser, parse, parseFragment
+from .treebuilders import getTreeBuilder
+from .treewalkers import getTreeWalker
+from .serializer import serialize
+
+__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
+ "getTreeWalker", "serialize"]
+__version__ = "0.9999-dev"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/constants.py b/testing/web-platform/tests/tools/html5lib/html5lib/constants.py
new file mode 100644
index 000000000..e7089846d
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/constants.py
@@ -0,0 +1,3104 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import string
+import gettext
+_ = gettext.gettext
+
+EOF = None
+
+E = {
+ "null-character":
+ _("Null character in input stream, replaced with U+FFFD."),
+ "invalid-codepoint":
+ _("Invalid codepoint in stream."),
+ "incorrectly-placed-solidus":
+ _("Solidus (/) incorrectly placed in tag."),
+ "incorrect-cr-newline-entity":
+ _("Incorrect CR newline entity, replaced with LF."),
+ "illegal-windows-1252-entity":
+ _("Entity used with illegal number (windows-1252 reference)."),
+ "cant-convert-numeric-entity":
+ _("Numeric entity couldn't be converted to character "
+ "(codepoint U+%(charAsInt)08x)."),
+ "illegal-codepoint-for-numeric-entity":
+ _("Numeric entity represents an illegal codepoint: "
+ "U+%(charAsInt)08x."),
+ "numeric-entity-without-semicolon":
+ _("Numeric entity didn't end with ';'."),
+ "expected-numeric-entity-but-got-eof":
+ _("Numeric entity expected. Got end of file instead."),
+ "expected-numeric-entity":
+ _("Numeric entity expected but none found."),
+ "named-entity-without-semicolon":
+ _("Named entity didn't end with ';'."),
+ "expected-named-entity":
+ _("Named entity expected. Got none."),
+ "attributes-in-end-tag":
+ _("End tag contains unexpected attributes."),
+ 'self-closing-flag-on-end-tag':
+ _("End tag contains unexpected self-closing flag."),
+ "expected-tag-name-but-got-right-bracket":
+ _("Expected tag name. Got '>' instead."),
+ "expected-tag-name-but-got-question-mark":
+ _("Expected tag name. Got '?' instead. (HTML doesn't "
+ "support processing instructions.)"),
+ "expected-tag-name":
+ _("Expected tag name. Got something else instead"),
+ "expected-closing-tag-but-got-right-bracket":
+ _("Expected closing tag. Got '>' instead. Ignoring '</>'."),
+ "expected-closing-tag-but-got-eof":
+ _("Expected closing tag. Unexpected end of file."),
+ "expected-closing-tag-but-got-char":
+ _("Expected closing tag. Unexpected character '%(data)s' found."),
+ "eof-in-tag-name":
+ _("Unexpected end of file in the tag name."),
+ "expected-attribute-name-but-got-eof":
+ _("Unexpected end of file. Expected attribute name instead."),
+ "eof-in-attribute-name":
+ _("Unexpected end of file in attribute name."),
+ "invalid-character-in-attribute-name":
+ _("Invalid character in attribute name"),
+ "duplicate-attribute":
+ _("Dropped duplicate attribute on tag."),
+ "expected-end-of-tag-name-but-got-eof":
+ _("Unexpected end of file. Expected = or end of tag."),
+ "expected-attribute-value-but-got-eof":
+ _("Unexpected end of file. Expected attribute value."),
+ "expected-attribute-value-but-got-right-bracket":
+ _("Expected attribute value. Got '>' instead."),
+ 'equals-in-unquoted-attribute-value':
+ _("Unexpected = in unquoted attribute"),
+ 'unexpected-character-in-unquoted-attribute-value':
+ _("Unexpected character in unquoted attribute"),
+ "invalid-character-after-attribute-name":
+ _("Unexpected character after attribute name."),
+ "unexpected-character-after-attribute-value":
+ _("Unexpected character after attribute value."),
+ "eof-in-attribute-value-double-quote":
+ _("Unexpected end of file in attribute value (\")."),
+ "eof-in-attribute-value-single-quote":
+ _("Unexpected end of file in attribute value (')."),
+ "eof-in-attribute-value-no-quotes":
+ _("Unexpected end of file in attribute value."),
+ "unexpected-EOF-after-solidus-in-tag":
+ _("Unexpected end of file in tag. Expected >"),
+ "unexpected-character-after-solidus-in-tag":
+ _("Unexpected character after / in tag. Expected >"),
+ "expected-dashes-or-doctype":
+ _("Expected '--' or 'DOCTYPE'. Not found."),
+ "unexpected-bang-after-double-dash-in-comment":
+ _("Unexpected ! after -- in comment"),
+ "unexpected-space-after-double-dash-in-comment":
+ _("Unexpected space after -- in comment"),
+ "incorrect-comment":
+ _("Incorrect comment."),
+ "eof-in-comment":
+ _("Unexpected end of file in comment."),
+ "eof-in-comment-end-dash":
+ _("Unexpected end of file in comment (-)"),
+ "unexpected-dash-after-double-dash-in-comment":
+ _("Unexpected '-' after '--' found in comment."),
+ "eof-in-comment-double-dash":
+ _("Unexpected end of file in comment (--)."),
+ "eof-in-comment-end-space-state":
+ _("Unexpected end of file in comment."),
+ "eof-in-comment-end-bang-state":
+ _("Unexpected end of file in comment."),
+ "unexpected-char-in-comment":
+ _("Unexpected character in comment found."),
+ "need-space-after-doctype":
+ _("No space after literal string 'DOCTYPE'."),
+ "expected-doctype-name-but-got-right-bracket":
+ _("Unexpected > character. Expected DOCTYPE name."),
+ "expected-doctype-name-but-got-eof":
+ _("Unexpected end of file. Expected DOCTYPE name."),
+ "eof-in-doctype-name":
+ _("Unexpected end of file in DOCTYPE name."),
+ "eof-in-doctype":
+ _("Unexpected end of file in DOCTYPE."),
+ "expected-space-or-right-bracket-in-doctype":
+ _("Expected space or '>'. Got '%(data)s'"),
+ "unexpected-end-of-doctype":
+ _("Unexpected end of DOCTYPE."),
+ "unexpected-char-in-doctype":
+ _("Unexpected character in DOCTYPE."),
+ "eof-in-innerhtml":
+ _("XXX innerHTML EOF"),
+ "unexpected-doctype":
+ _("Unexpected DOCTYPE. Ignored."),
+ "non-html-root":
+ _("html needs to be the first start tag."),
+ "expected-doctype-but-got-eof":
+ _("Unexpected End of file. Expected DOCTYPE."),
+ "unknown-doctype":
+ _("Erroneous DOCTYPE."),
+ "expected-doctype-but-got-chars":
+ _("Unexpected non-space characters. Expected DOCTYPE."),
+ "expected-doctype-but-got-start-tag":
+ _("Unexpected start tag (%(name)s). Expected DOCTYPE."),
+ "expected-doctype-but-got-end-tag":
+ _("Unexpected end tag (%(name)s). Expected DOCTYPE."),
+ "end-tag-after-implied-root":
+ _("Unexpected end tag (%(name)s) after the (implied) root element."),
+ "expected-named-closing-tag-but-got-eof":
+ _("Unexpected end of file. Expected end tag (%(name)s)."),
+ "two-heads-are-not-better-than-one":
+ _("Unexpected start tag head in existing head. Ignored."),
+ "unexpected-end-tag":
+ _("Unexpected end tag (%(name)s). Ignored."),
+ "unexpected-start-tag-out-of-my-head":
+ _("Unexpected start tag (%(name)s) that can be in head. Moved."),
+ "unexpected-start-tag":
+ _("Unexpected start tag (%(name)s)."),
+ "missing-end-tag":
+ _("Missing end tag (%(name)s)."),
+ "missing-end-tags":
+ _("Missing end tags (%(name)s)."),
+ "unexpected-start-tag-implies-end-tag":
+ _("Unexpected start tag (%(startName)s) "
+ "implies end tag (%(endName)s)."),
+ "unexpected-start-tag-treated-as":
+ _("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
+ "deprecated-tag":
+ _("Unexpected start tag %(name)s. Don't use it!"),
+ "unexpected-start-tag-ignored":
+ _("Unexpected start tag %(name)s. Ignored."),
+ "expected-one-end-tag-but-got-another":
+ _("Unexpected end tag (%(gotName)s). "
+ "Missing end tag (%(expectedName)s)."),
+ "end-tag-too-early":
+ _("End tag (%(name)s) seen too early. Expected other end tag."),
+ "end-tag-too-early-named":
+ _("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
+ "end-tag-too-early-ignored":
+ _("End tag (%(name)s) seen too early. Ignored."),
+ "adoption-agency-1.1":
+ _("End tag (%(name)s) violates step 1, "
+ "paragraph 1 of the adoption agency algorithm."),
+ "adoption-agency-1.2":
+ _("End tag (%(name)s) violates step 1, "
+ "paragraph 2 of the adoption agency algorithm."),
+ "adoption-agency-1.3":
+ _("End tag (%(name)s) violates step 1, "
+ "paragraph 3 of the adoption agency algorithm."),
+ "adoption-agency-4.4":
+ _("End tag (%(name)s) violates step 4, "
+ "paragraph 4 of the adoption agency algorithm."),
+ "unexpected-end-tag-treated-as":
+ _("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
+ "no-end-tag":
+ _("This element (%(name)s) has no end tag."),
+ "unexpected-implied-end-tag-in-table":
+ _("Unexpected implied end tag (%(name)s) in the table phase."),
+ "unexpected-implied-end-tag-in-table-body":
+ _("Unexpected implied end tag (%(name)s) in the table body phase."),
+ "unexpected-char-implies-table-voodoo":
+ _("Unexpected non-space characters in "
+ "table context caused voodoo mode."),
+ "unexpected-hidden-input-in-table":
+ _("Unexpected input with type hidden in table context."),
+ "unexpected-form-in-table":
+ _("Unexpected form in table context."),
+ "unexpected-start-tag-implies-table-voodoo":
+ _("Unexpected start tag (%(name)s) in "
+ "table context caused voodoo mode."),
+ "unexpected-end-tag-implies-table-voodoo":
+ _("Unexpected end tag (%(name)s) in "
+ "table context caused voodoo mode."),
+ "unexpected-cell-in-table-body":
+ _("Unexpected table cell start tag (%(name)s) "
+ "in the table body phase."),
+ "unexpected-cell-end-tag":
+ _("Got table cell end tag (%(name)s) "
+ "while required end tags are missing."),
+ "unexpected-end-tag-in-table-body":
+ _("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
+ "unexpected-implied-end-tag-in-table-row":
+ _("Unexpected implied end tag (%(name)s) in the table row phase."),
+ "unexpected-end-tag-in-table-row":
+ _("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
+ "unexpected-select-in-select":
+ _("Unexpected select start tag in the select phase "
+ "treated as select end tag."),
+ "unexpected-input-in-select":
+ _("Unexpected input start tag in the select phase."),
+ "unexpected-start-tag-in-select":
+ _("Unexpected start tag token (%(name)s in the select phase. "
+ "Ignored."),
+ "unexpected-end-tag-in-select":
+ _("Unexpected end tag (%(name)s) in the select phase. Ignored."),
+ "unexpected-table-element-start-tag-in-select-in-table":
+ _("Unexpected table element start tag (%(name)s) in the select in table phase."),
+ "unexpected-table-element-end-tag-in-select-in-table":
+ _("Unexpected table element end tag (%(name)s) in the select in table phase."),
+ "unexpected-char-after-body":
+ _("Unexpected non-space characters in the after body phase."),
+ "unexpected-start-tag-after-body":
+ _("Unexpected start tag token (%(name)s)"
+ " in the after body phase."),
+ "unexpected-end-tag-after-body":
+ _("Unexpected end tag token (%(name)s)"
+ " in the after body phase."),
+ "unexpected-char-in-frameset":
+ _("Unexpected characters in the frameset phase. Characters ignored."),
+ "unexpected-start-tag-in-frameset":
+ _("Unexpected start tag token (%(name)s)"
+ " in the frameset phase. Ignored."),
+ "unexpected-frameset-in-frameset-innerhtml":
+ _("Unexpected end tag token (frameset) "
+ "in the frameset phase (innerHTML)."),
+ "unexpected-end-tag-in-frameset":
+ _("Unexpected end tag token (%(name)s)"
+ " in the frameset phase. Ignored."),
+ "unexpected-char-after-frameset":
+ _("Unexpected non-space characters in the "
+ "after frameset phase. Ignored."),
+ "unexpected-start-tag-after-frameset":
+ _("Unexpected start tag (%(name)s)"
+ " in the after frameset phase. Ignored."),
+ "unexpected-end-tag-after-frameset":
+ _("Unexpected end tag (%(name)s)"
+ " in the after frameset phase. Ignored."),
+ "unexpected-end-tag-after-body-innerhtml":
+ _("Unexpected end tag after body(innerHtml)"),
+ "expected-eof-but-got-char":
+ _("Unexpected non-space characters. Expected end of file."),
+ "expected-eof-but-got-start-tag":
+ _("Unexpected start tag (%(name)s)"
+ ". Expected end of file."),
+ "expected-eof-but-got-end-tag":
+ _("Unexpected end tag (%(name)s)"
+ ". Expected end of file."),
+ "eof-in-table":
+ _("Unexpected end of file. Expected table content."),
+ "eof-in-select":
+ _("Unexpected end of file. Expected select content."),
+ "eof-in-frameset":
+ _("Unexpected end of file. Expected frameset content."),
+ "eof-in-script-in-script":
+ _("Unexpected end of file. Expected script content."),
+ "eof-in-foreign-lands":
+ _("Unexpected end of file. Expected foreign content"),
+ "non-void-element-with-trailing-solidus":
+ _("Trailing solidus not allowed on element %(name)s"),
+ "unexpected-html-element-in-foreign-content":
+ _("Element %(name)s not allowed in a non-html context"),
+ "unexpected-end-tag-before-html":
+ _("Unexpected end tag (%(name)s) before html."),
+ "XXX-undefined-error":
+ _("Undefined error (this sucks and should be fixed)"),
+}
+
+namespaces = {
+ "html": "http://www.w3.org/1999/xhtml",
+ "mathml": "http://www.w3.org/1998/Math/MathML",
+ "svg": "http://www.w3.org/2000/svg",
+ "xlink": "http://www.w3.org/1999/xlink",
+ "xml": "http://www.w3.org/XML/1998/namespace",
+ "xmlns": "http://www.w3.org/2000/xmlns/"
+}
+
+scopingElements = frozenset((
+ (namespaces["html"], "applet"),
+ (namespaces["html"], "caption"),
+ (namespaces["html"], "html"),
+ (namespaces["html"], "marquee"),
+ (namespaces["html"], "object"),
+ (namespaces["html"], "table"),
+ (namespaces["html"], "td"),
+ (namespaces["html"], "th"),
+ (namespaces["mathml"], "mi"),
+ (namespaces["mathml"], "mo"),
+ (namespaces["mathml"], "mn"),
+ (namespaces["mathml"], "ms"),
+ (namespaces["mathml"], "mtext"),
+ (namespaces["mathml"], "annotation-xml"),
+ (namespaces["svg"], "foreignObject"),
+ (namespaces["svg"], "desc"),
+ (namespaces["svg"], "title"),
+))
+
+formattingElements = frozenset((
+ (namespaces["html"], "a"),
+ (namespaces["html"], "b"),
+ (namespaces["html"], "big"),
+ (namespaces["html"], "code"),
+ (namespaces["html"], "em"),
+ (namespaces["html"], "font"),
+ (namespaces["html"], "i"),
+ (namespaces["html"], "nobr"),
+ (namespaces["html"], "s"),
+ (namespaces["html"], "small"),
+ (namespaces["html"], "strike"),
+ (namespaces["html"], "strong"),
+ (namespaces["html"], "tt"),
+ (namespaces["html"], "u")
+))
+
+specialElements = frozenset((
+ (namespaces["html"], "address"),
+ (namespaces["html"], "applet"),
+ (namespaces["html"], "area"),
+ (namespaces["html"], "article"),
+ (namespaces["html"], "aside"),
+ (namespaces["html"], "base"),
+ (namespaces["html"], "basefont"),
+ (namespaces["html"], "bgsound"),
+ (namespaces["html"], "blockquote"),
+ (namespaces["html"], "body"),
+ (namespaces["html"], "br"),
+ (namespaces["html"], "button"),
+ (namespaces["html"], "caption"),
+ (namespaces["html"], "center"),
+ (namespaces["html"], "col"),
+ (namespaces["html"], "colgroup"),
+ (namespaces["html"], "command"),
+ (namespaces["html"], "dd"),
+ (namespaces["html"], "details"),
+ (namespaces["html"], "dir"),
+ (namespaces["html"], "div"),
+ (namespaces["html"], "dl"),
+ (namespaces["html"], "dt"),
+ (namespaces["html"], "embed"),
+ (namespaces["html"], "fieldset"),
+ (namespaces["html"], "figure"),
+ (namespaces["html"], "footer"),
+ (namespaces["html"], "form"),
+ (namespaces["html"], "frame"),
+ (namespaces["html"], "frameset"),
+ (namespaces["html"], "h1"),
+ (namespaces["html"], "h2"),
+ (namespaces["html"], "h3"),
+ (namespaces["html"], "h4"),
+ (namespaces["html"], "h5"),
+ (namespaces["html"], "h6"),
+ (namespaces["html"], "head"),
+ (namespaces["html"], "header"),
+ (namespaces["html"], "hr"),
+ (namespaces["html"], "html"),
+ (namespaces["html"], "iframe"),
+ # Note that image is commented out in the spec as "this isn't an
+ # element that can end up on the stack, so it doesn't matter,"
+ (namespaces["html"], "image"),
+ (namespaces["html"], "img"),
+ (namespaces["html"], "input"),
+ (namespaces["html"], "isindex"),
+ (namespaces["html"], "li"),
+ (namespaces["html"], "link"),
+ (namespaces["html"], "listing"),
+ (namespaces["html"], "marquee"),
+ (namespaces["html"], "menu"),
+ (namespaces["html"], "meta"),
+ (namespaces["html"], "nav"),
+ (namespaces["html"], "noembed"),
+ (namespaces["html"], "noframes"),
+ (namespaces["html"], "noscript"),
+ (namespaces["html"], "object"),
+ (namespaces["html"], "ol"),
+ (namespaces["html"], "p"),
+ (namespaces["html"], "param"),
+ (namespaces["html"], "plaintext"),
+ (namespaces["html"], "pre"),
+ (namespaces["html"], "script"),
+ (namespaces["html"], "section"),
+ (namespaces["html"], "select"),
+ (namespaces["html"], "style"),
+ (namespaces["html"], "table"),
+ (namespaces["html"], "tbody"),
+ (namespaces["html"], "td"),
+ (namespaces["html"], "textarea"),
+ (namespaces["html"], "tfoot"),
+ (namespaces["html"], "th"),
+ (namespaces["html"], "thead"),
+ (namespaces["html"], "title"),
+ (namespaces["html"], "tr"),
+ (namespaces["html"], "ul"),
+ (namespaces["html"], "wbr"),
+ (namespaces["html"], "xmp"),
+ (namespaces["svg"], "foreignObject")
+))
+
+htmlIntegrationPointElements = frozenset((
+ (namespaces["mathml"], "annotaion-xml"),
+ (namespaces["svg"], "foreignObject"),
+ (namespaces["svg"], "desc"),
+ (namespaces["svg"], "title")
+))
+
+mathmlTextIntegrationPointElements = frozenset((
+ (namespaces["mathml"], "mi"),
+ (namespaces["mathml"], "mo"),
+ (namespaces["mathml"], "mn"),
+ (namespaces["mathml"], "ms"),
+ (namespaces["mathml"], "mtext")
+))
+
+adjustForeignAttributes = {
+ "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
+ "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
+ "xlink:href": ("xlink", "href", namespaces["xlink"]),
+ "xlink:role": ("xlink", "role", namespaces["xlink"]),
+ "xlink:show": ("xlink", "show", namespaces["xlink"]),
+ "xlink:title": ("xlink", "title", namespaces["xlink"]),
+ "xlink:type": ("xlink", "type", namespaces["xlink"]),
+ "xml:base": ("xml", "base", namespaces["xml"]),
+ "xml:lang": ("xml", "lang", namespaces["xml"]),
+ "xml:space": ("xml", "space", namespaces["xml"]),
+ "xmlns": (None, "xmlns", namespaces["xmlns"]),
+ "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
+}
+
+unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
+ adjustForeignAttributes.items()])
+
+spaceCharacters = frozenset((
+ "\t",
+ "\n",
+ "\u000C",
+ " ",
+ "\r"
+))
+
+tableInsertModeElements = frozenset((
+ "table",
+ "tbody",
+ "tfoot",
+ "thead",
+ "tr"
+))
+
+asciiLowercase = frozenset(string.ascii_lowercase)
+asciiUppercase = frozenset(string.ascii_uppercase)
+asciiLetters = frozenset(string.ascii_letters)
+digits = frozenset(string.digits)
+hexDigits = frozenset(string.hexdigits)
+
+asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
+ for c in string.ascii_uppercase])
+
+# Heading elements need to be ordered
+headingElements = (
+ "h1",
+ "h2",
+ "h3",
+ "h4",
+ "h5",
+ "h6"
+)
+
+voidElements = frozenset((
+ "base",
+ "command",
+ "event-source",
+ "link",
+ "meta",
+ "hr",
+ "br",
+ "img",
+ "embed",
+ "param",
+ "area",
+ "col",
+ "input",
+ "source",
+ "track"
+))
+
+cdataElements = frozenset(('title', 'textarea'))
+
+rcdataElements = frozenset((
+ 'style',
+ 'script',
+ 'xmp',
+ 'iframe',
+ 'noembed',
+ 'noframes',
+ 'noscript'
+))
+
+booleanAttributes = {
+ "": frozenset(("irrelevant",)),
+ "style": frozenset(("scoped",)),
+ "img": frozenset(("ismap",)),
+ "audio": frozenset(("autoplay", "controls")),
+ "video": frozenset(("autoplay", "controls")),
+ "script": frozenset(("defer", "async")),
+ "details": frozenset(("open",)),
+ "datagrid": frozenset(("multiple", "disabled")),
+ "command": frozenset(("hidden", "disabled", "checked", "default")),
+ "hr": frozenset(("noshade")),
+ "menu": frozenset(("autosubmit",)),
+ "fieldset": frozenset(("disabled", "readonly")),
+ "option": frozenset(("disabled", "readonly", "selected")),
+ "optgroup": frozenset(("disabled", "readonly")),
+ "button": frozenset(("disabled", "autofocus")),
+ "input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
+ "select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
+ "output": frozenset(("disabled", "readonly")),
+}
+
+# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
+# therefore can't be a frozenset.
+entitiesWindows1252 = (
+ 8364, # 0x80 0x20AC EURO SIGN
+ 65533, # 0x81 UNDEFINED
+ 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
+ 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
+ 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
+ 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
+ 8224, # 0x86 0x2020 DAGGER
+ 8225, # 0x87 0x2021 DOUBLE DAGGER
+ 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
+ 8240, # 0x89 0x2030 PER MILLE SIGN
+ 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
+ 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
+ 65533, # 0x8D UNDEFINED
+ 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
+ 65533, # 0x8F UNDEFINED
+ 65533, # 0x90 UNDEFINED
+ 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
+ 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
+ 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
+ 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
+ 8226, # 0x95 0x2022 BULLET
+ 8211, # 0x96 0x2013 EN DASH
+ 8212, # 0x97 0x2014 EM DASH
+ 732, # 0x98 0x02DC SMALL TILDE
+ 8482, # 0x99 0x2122 TRADE MARK SIGN
+ 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
+ 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
+ 65533, # 0x9D UNDEFINED
+ 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
+ 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
+)
+
+xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
+
+entities = {
+ "AElig": "\xc6",
+ "AElig;": "\xc6",
+ "AMP": "&",
+ "AMP;": "&",
+ "Aacute": "\xc1",
+ "Aacute;": "\xc1",
+ "Abreve;": "\u0102",
+ "Acirc": "\xc2",
+ "Acirc;": "\xc2",
+ "Acy;": "\u0410",
+ "Afr;": "\U0001d504",
+ "Agrave": "\xc0",
+ "Agrave;": "\xc0",
+ "Alpha;": "\u0391",
+ "Amacr;": "\u0100",
+ "And;": "\u2a53",
+ "Aogon;": "\u0104",
+ "Aopf;": "\U0001d538",
+ "ApplyFunction;": "\u2061",
+ "Aring": "\xc5",
+ "Aring;": "\xc5",
+ "Ascr;": "\U0001d49c",
+ "Assign;": "\u2254",
+ "Atilde": "\xc3",
+ "Atilde;": "\xc3",
+ "Auml": "\xc4",
+ "Auml;": "\xc4",
+ "Backslash;": "\u2216",
+ "Barv;": "\u2ae7",
+ "Barwed;": "\u2306",
+ "Bcy;": "\u0411",
+ "Because;": "\u2235",
+ "Bernoullis;": "\u212c",
+ "Beta;": "\u0392",
+ "Bfr;": "\U0001d505",
+ "Bopf;": "\U0001d539",
+ "Breve;": "\u02d8",
+ "Bscr;": "\u212c",
+ "Bumpeq;": "\u224e",
+ "CHcy;": "\u0427",
+ "COPY": "\xa9",
+ "COPY;": "\xa9",
+ "Cacute;": "\u0106",
+ "Cap;": "\u22d2",
+ "CapitalDifferentialD;": "\u2145",
+ "Cayleys;": "\u212d",
+ "Ccaron;": "\u010c",
+ "Ccedil": "\xc7",
+ "Ccedil;": "\xc7",
+ "Ccirc;": "\u0108",
+ "Cconint;": "\u2230",
+ "Cdot;": "\u010a",
+ "Cedilla;": "\xb8",
+ "CenterDot;": "\xb7",
+ "Cfr;": "\u212d",
+ "Chi;": "\u03a7",
+ "CircleDot;": "\u2299",
+ "CircleMinus;": "\u2296",
+ "CirclePlus;": "\u2295",
+ "CircleTimes;": "\u2297",
+ "ClockwiseContourIntegral;": "\u2232",
+ "CloseCurlyDoubleQuote;": "\u201d",
+ "CloseCurlyQuote;": "\u2019",
+ "Colon;": "\u2237",
+ "Colone;": "\u2a74",
+ "Congruent;": "\u2261",
+ "Conint;": "\u222f",
+ "ContourIntegral;": "\u222e",
+ "Copf;": "\u2102",
+ "Coproduct;": "\u2210",
+ "CounterClockwiseContourIntegral;": "\u2233",
+ "Cross;": "\u2a2f",
+ "Cscr;": "\U0001d49e",
+ "Cup;": "\u22d3",
+ "CupCap;": "\u224d",
+ "DD;": "\u2145",
+ "DDotrahd;": "\u2911",
+ "DJcy;": "\u0402",
+ "DScy;": "\u0405",
+ "DZcy;": "\u040f",
+ "Dagger;": "\u2021",
+ "Darr;": "\u21a1",
+ "Dashv;": "\u2ae4",
+ "Dcaron;": "\u010e",
+ "Dcy;": "\u0414",
+ "Del;": "\u2207",
+ "Delta;": "\u0394",
+ "Dfr;": "\U0001d507",
+ "DiacriticalAcute;": "\xb4",
+ "DiacriticalDot;": "\u02d9",
+ "DiacriticalDoubleAcute;": "\u02dd",
+ "DiacriticalGrave;": "`",
+ "DiacriticalTilde;": "\u02dc",
+ "Diamond;": "\u22c4",
+ "DifferentialD;": "\u2146",
+ "Dopf;": "\U0001d53b",
+ "Dot;": "\xa8",
+ "DotDot;": "\u20dc",
+ "DotEqual;": "\u2250",
+ "DoubleContourIntegral;": "\u222f",
+ "DoubleDot;": "\xa8",
+ "DoubleDownArrow;": "\u21d3",
+ "DoubleLeftArrow;": "\u21d0",
+ "DoubleLeftRightArrow;": "\u21d4",
+ "DoubleLeftTee;": "\u2ae4",
+ "DoubleLongLeftArrow;": "\u27f8",
+ "DoubleLongLeftRightArrow;": "\u27fa",
+ "DoubleLongRightArrow;": "\u27f9",
+ "DoubleRightArrow;": "\u21d2",
+ "DoubleRightTee;": "\u22a8",
+ "DoubleUpArrow;": "\u21d1",
+ "DoubleUpDownArrow;": "\u21d5",
+ "DoubleVerticalBar;": "\u2225",
+ "DownArrow;": "\u2193",
+ "DownArrowBar;": "\u2913",
+ "DownArrowUpArrow;": "\u21f5",
+ "DownBreve;": "\u0311",
+ "DownLeftRightVector;": "\u2950",
+ "DownLeftTeeVector;": "\u295e",
+ "DownLeftVector;": "\u21bd",
+ "DownLeftVectorBar;": "\u2956",
+ "DownRightTeeVector;": "\u295f",
+ "DownRightVector;": "\u21c1",
+ "DownRightVectorBar;": "\u2957",
+ "DownTee;": "\u22a4",
+ "DownTeeArrow;": "\u21a7",
+ "Downarrow;": "\u21d3",
+ "Dscr;": "\U0001d49f",
+ "Dstrok;": "\u0110",
+ "ENG;": "\u014a",
+ "ETH": "\xd0",
+ "ETH;": "\xd0",
+ "Eacute": "\xc9",
+ "Eacute;": "\xc9",
+ "Ecaron;": "\u011a",
+ "Ecirc": "\xca",
+ "Ecirc;": "\xca",
+ "Ecy;": "\u042d",
+ "Edot;": "\u0116",
+ "Efr;": "\U0001d508",
+ "Egrave": "\xc8",
+ "Egrave;": "\xc8",
+ "Element;": "\u2208",
+ "Emacr;": "\u0112",
+ "EmptySmallSquare;": "\u25fb",
+ "EmptyVerySmallSquare;": "\u25ab",
+ "Eogon;": "\u0118",
+ "Eopf;": "\U0001d53c",
+ "Epsilon;": "\u0395",
+ "Equal;": "\u2a75",
+ "EqualTilde;": "\u2242",
+ "Equilibrium;": "\u21cc",
+ "Escr;": "\u2130",
+ "Esim;": "\u2a73",
+ "Eta;": "\u0397",
+ "Euml": "\xcb",
+ "Euml;": "\xcb",
+ "Exists;": "\u2203",
+ "ExponentialE;": "\u2147",
+ "Fcy;": "\u0424",
+ "Ffr;": "\U0001d509",
+ "FilledSmallSquare;": "\u25fc",
+ "FilledVerySmallSquare;": "\u25aa",
+ "Fopf;": "\U0001d53d",
+ "ForAll;": "\u2200",
+ "Fouriertrf;": "\u2131",
+ "Fscr;": "\u2131",
+ "GJcy;": "\u0403",
+ "GT": ">",
+ "GT;": ">",
+ "Gamma;": "\u0393",
+ "Gammad;": "\u03dc",
+ "Gbreve;": "\u011e",
+ "Gcedil;": "\u0122",
+ "Gcirc;": "\u011c",
+ "Gcy;": "\u0413",
+ "Gdot;": "\u0120",
+ "Gfr;": "\U0001d50a",
+ "Gg;": "\u22d9",
+ "Gopf;": "\U0001d53e",
+ "GreaterEqual;": "\u2265",
+ "GreaterEqualLess;": "\u22db",
+ "GreaterFullEqual;": "\u2267",
+ "GreaterGreater;": "\u2aa2",
+ "GreaterLess;": "\u2277",
+ "GreaterSlantEqual;": "\u2a7e",
+ "GreaterTilde;": "\u2273",
+ "Gscr;": "\U0001d4a2",
+ "Gt;": "\u226b",
+ "HARDcy;": "\u042a",
+ "Hacek;": "\u02c7",
+ "Hat;": "^",
+ "Hcirc;": "\u0124",
+ "Hfr;": "\u210c",
+ "HilbertSpace;": "\u210b",
+ "Hopf;": "\u210d",
+ "HorizontalLine;": "\u2500",
+ "Hscr;": "\u210b",
+ "Hstrok;": "\u0126",
+ "HumpDownHump;": "\u224e",
+ "HumpEqual;": "\u224f",
+ "IEcy;": "\u0415",
+ "IJlig;": "\u0132",
+ "IOcy;": "\u0401",
+ "Iacute": "\xcd",
+ "Iacute;": "\xcd",
+ "Icirc": "\xce",
+ "Icirc;": "\xce",
+ "Icy;": "\u0418",
+ "Idot;": "\u0130",
+ "Ifr;": "\u2111",
+ "Igrave": "\xcc",
+ "Igrave;": "\xcc",
+ "Im;": "\u2111",
+ "Imacr;": "\u012a",
+ "ImaginaryI;": "\u2148",
+ "Implies;": "\u21d2",
+ "Int;": "\u222c",
+ "Integral;": "\u222b",
+ "Intersection;": "\u22c2",
+ "InvisibleComma;": "\u2063",
+ "InvisibleTimes;": "\u2062",
+ "Iogon;": "\u012e",
+ "Iopf;": "\U0001d540",
+ "Iota;": "\u0399",
+ "Iscr;": "\u2110",
+ "Itilde;": "\u0128",
+ "Iukcy;": "\u0406",
+ "Iuml": "\xcf",
+ "Iuml;": "\xcf",
+ "Jcirc;": "\u0134",
+ "Jcy;": "\u0419",
+ "Jfr;": "\U0001d50d",
+ "Jopf;": "\U0001d541",
+ "Jscr;": "\U0001d4a5",
+ "Jsercy;": "\u0408",
+ "Jukcy;": "\u0404",
+ "KHcy;": "\u0425",
+ "KJcy;": "\u040c",
+ "Kappa;": "\u039a",
+ "Kcedil;": "\u0136",
+ "Kcy;": "\u041a",
+ "Kfr;": "\U0001d50e",
+ "Kopf;": "\U0001d542",
+ "Kscr;": "\U0001d4a6",
+ "LJcy;": "\u0409",
+ "LT": "<",
+ "LT;": "<",
+ "Lacute;": "\u0139",
+ "Lambda;": "\u039b",
+ "Lang;": "\u27ea",
+ "Laplacetrf;": "\u2112",
+ "Larr;": "\u219e",
+ "Lcaron;": "\u013d",
+ "Lcedil;": "\u013b",
+ "Lcy;": "\u041b",
+ "LeftAngleBracket;": "\u27e8",
+ "LeftArrow;": "\u2190",
+ "LeftArrowBar;": "\u21e4",
+ "LeftArrowRightArrow;": "\u21c6",
+ "LeftCeiling;": "\u2308",
+ "LeftDoubleBracket;": "\u27e6",
+ "LeftDownTeeVector;": "\u2961",
+ "LeftDownVector;": "\u21c3",
+ "LeftDownVectorBar;": "\u2959",
+ "LeftFloor;": "\u230a",
+ "LeftRightArrow;": "\u2194",
+ "LeftRightVector;": "\u294e",
+ "LeftTee;": "\u22a3",
+ "LeftTeeArrow;": "\u21a4",
+ "LeftTeeVector;": "\u295a",
+ "LeftTriangle;": "\u22b2",
+ "LeftTriangleBar;": "\u29cf",
+ "LeftTriangleEqual;": "\u22b4",
+ "LeftUpDownVector;": "\u2951",
+ "LeftUpTeeVector;": "\u2960",
+ "LeftUpVector;": "\u21bf",
+ "LeftUpVectorBar;": "\u2958",
+ "LeftVector;": "\u21bc",
+ "LeftVectorBar;": "\u2952",
+ "Leftarrow;": "\u21d0",
+ "Leftrightarrow;": "\u21d4",
+ "LessEqualGreater;": "\u22da",
+ "LessFullEqual;": "\u2266",
+ "LessGreater;": "\u2276",
+ "LessLess;": "\u2aa1",
+ "LessSlantEqual;": "\u2a7d",
+ "LessTilde;": "\u2272",
+ "Lfr;": "\U0001d50f",
+ "Ll;": "\u22d8",
+ "Lleftarrow;": "\u21da",
+ "Lmidot;": "\u013f",
+ "LongLeftArrow;": "\u27f5",
+ "LongLeftRightArrow;": "\u27f7",
+ "LongRightArrow;": "\u27f6",
+ "Longleftarrow;": "\u27f8",
+ "Longleftrightarrow;": "\u27fa",
+ "Longrightarrow;": "\u27f9",
+ "Lopf;": "\U0001d543",
+ "LowerLeftArrow;": "\u2199",
+ "LowerRightArrow;": "\u2198",
+ "Lscr;": "\u2112",
+ "Lsh;": "\u21b0",
+ "Lstrok;": "\u0141",
+ "Lt;": "\u226a",
+ "Map;": "\u2905",
+ "Mcy;": "\u041c",
+ "MediumSpace;": "\u205f",
+ "Mellintrf;": "\u2133",
+ "Mfr;": "\U0001d510",
+ "MinusPlus;": "\u2213",
+ "Mopf;": "\U0001d544",
+ "Mscr;": "\u2133",
+ "Mu;": "\u039c",
+ "NJcy;": "\u040a",
+ "Nacute;": "\u0143",
+ "Ncaron;": "\u0147",
+ "Ncedil;": "\u0145",
+ "Ncy;": "\u041d",
+ "NegativeMediumSpace;": "\u200b",
+ "NegativeThickSpace;": "\u200b",
+ "NegativeThinSpace;": "\u200b",
+ "NegativeVeryThinSpace;": "\u200b",
+ "NestedGreaterGreater;": "\u226b",
+ "NestedLessLess;": "\u226a",
+ "NewLine;": "\n",
+ "Nfr;": "\U0001d511",
+ "NoBreak;": "\u2060",
+ "NonBreakingSpace;": "\xa0",
+ "Nopf;": "\u2115",
+ "Not;": "\u2aec",
+ "NotCongruent;": "\u2262",
+ "NotCupCap;": "\u226d",
+ "NotDoubleVerticalBar;": "\u2226",
+ "NotElement;": "\u2209",
+ "NotEqual;": "\u2260",
+ "NotEqualTilde;": "\u2242\u0338",
+ "NotExists;": "\u2204",
+ "NotGreater;": "\u226f",
+ "NotGreaterEqual;": "\u2271",
+ "NotGreaterFullEqual;": "\u2267\u0338",
+ "NotGreaterGreater;": "\u226b\u0338",
+ "NotGreaterLess;": "\u2279",
+ "NotGreaterSlantEqual;": "\u2a7e\u0338",
+ "NotGreaterTilde;": "\u2275",
+ "NotHumpDownHump;": "\u224e\u0338",
+ "NotHumpEqual;": "\u224f\u0338",
+ "NotLeftTriangle;": "\u22ea",
+ "NotLeftTriangleBar;": "\u29cf\u0338",
+ "NotLeftTriangleEqual;": "\u22ec",
+ "NotLess;": "\u226e",
+ "NotLessEqual;": "\u2270",
+ "NotLessGreater;": "\u2278",
+ "NotLessLess;": "\u226a\u0338",
+ "NotLessSlantEqual;": "\u2a7d\u0338",
+ "NotLessTilde;": "\u2274",
+ "NotNestedGreaterGreater;": "\u2aa2\u0338",
+ "NotNestedLessLess;": "\u2aa1\u0338",
+ "NotPrecedes;": "\u2280",
+ "NotPrecedesEqual;": "\u2aaf\u0338",
+ "NotPrecedesSlantEqual;": "\u22e0",
+ "NotReverseElement;": "\u220c",
+ "NotRightTriangle;": "\u22eb",
+ "NotRightTriangleBar;": "\u29d0\u0338",
+ "NotRightTriangleEqual;": "\u22ed",
+ "NotSquareSubset;": "\u228f\u0338",
+ "NotSquareSubsetEqual;": "\u22e2",
+ "NotSquareSuperset;": "\u2290\u0338",
+ "NotSquareSupersetEqual;": "\u22e3",
+ "NotSubset;": "\u2282\u20d2",
+ "NotSubsetEqual;": "\u2288",
+ "NotSucceeds;": "\u2281",
+ "NotSucceedsEqual;": "\u2ab0\u0338",
+ "NotSucceedsSlantEqual;": "\u22e1",
+ "NotSucceedsTilde;": "\u227f\u0338",
+ "NotSuperset;": "\u2283\u20d2",
+ "NotSupersetEqual;": "\u2289",
+ "NotTilde;": "\u2241",
+ "NotTildeEqual;": "\u2244",
+ "NotTildeFullEqual;": "\u2247",
+ "NotTildeTilde;": "\u2249",
+ "NotVerticalBar;": "\u2224",
+ "Nscr;": "\U0001d4a9",
+ "Ntilde": "\xd1",
+ "Ntilde;": "\xd1",
+ "Nu;": "\u039d",
+ "OElig;": "\u0152",
+ "Oacute": "\xd3",
+ "Oacute;": "\xd3",
+ "Ocirc": "\xd4",
+ "Ocirc;": "\xd4",
+ "Ocy;": "\u041e",
+ "Odblac;": "\u0150",
+ "Ofr;": "\U0001d512",
+ "Ograve": "\xd2",
+ "Ograve;": "\xd2",
+ "Omacr;": "\u014c",
+ "Omega;": "\u03a9",
+ "Omicron;": "\u039f",
+ "Oopf;": "\U0001d546",
+ "OpenCurlyDoubleQuote;": "\u201c",
+ "OpenCurlyQuote;": "\u2018",
+ "Or;": "\u2a54",
+ "Oscr;": "\U0001d4aa",
+ "Oslash": "\xd8",
+ "Oslash;": "\xd8",
+ "Otilde": "\xd5",
+ "Otilde;": "\xd5",
+ "Otimes;": "\u2a37",
+ "Ouml": "\xd6",
+ "Ouml;": "\xd6",
+ "OverBar;": "\u203e",
+ "OverBrace;": "\u23de",
+ "OverBracket;": "\u23b4",
+ "OverParenthesis;": "\u23dc",
+ "PartialD;": "\u2202",
+ "Pcy;": "\u041f",
+ "Pfr;": "\U0001d513",
+ "Phi;": "\u03a6",
+ "Pi;": "\u03a0",
+ "PlusMinus;": "\xb1",
+ "Poincareplane;": "\u210c",
+ "Popf;": "\u2119",
+ "Pr;": "\u2abb",
+ "Precedes;": "\u227a",
+ "PrecedesEqual;": "\u2aaf",
+ "PrecedesSlantEqual;": "\u227c",
+ "PrecedesTilde;": "\u227e",
+ "Prime;": "\u2033",
+ "Product;": "\u220f",
+ "Proportion;": "\u2237",
+ "Proportional;": "\u221d",
+ "Pscr;": "\U0001d4ab",
+ "Psi;": "\u03a8",
+ "QUOT": "\"",
+ "QUOT;": "\"",
+ "Qfr;": "\U0001d514",
+ "Qopf;": "\u211a",
+ "Qscr;": "\U0001d4ac",
+ "RBarr;": "\u2910",
+ "REG": "\xae",
+ "REG;": "\xae",
+ "Racute;": "\u0154",
+ "Rang;": "\u27eb",
+ "Rarr;": "\u21a0",
+ "Rarrtl;": "\u2916",
+ "Rcaron;": "\u0158",
+ "Rcedil;": "\u0156",
+ "Rcy;": "\u0420",
+ "Re;": "\u211c",
+ "ReverseElement;": "\u220b",
+ "ReverseEquilibrium;": "\u21cb",
+ "ReverseUpEquilibrium;": "\u296f",
+ "Rfr;": "\u211c",
+ "Rho;": "\u03a1",
+ "RightAngleBracket;": "\u27e9",
+ "RightArrow;": "\u2192",
+ "RightArrowBar;": "\u21e5",
+ "RightArrowLeftArrow;": "\u21c4",
+ "RightCeiling;": "\u2309",
+ "RightDoubleBracket;": "\u27e7",
+ "RightDownTeeVector;": "\u295d",
+ "RightDownVector;": "\u21c2",
+ "RightDownVectorBar;": "\u2955",
+ "RightFloor;": "\u230b",
+ "RightTee;": "\u22a2",
+ "RightTeeArrow;": "\u21a6",
+ "RightTeeVector;": "\u295b",
+ "RightTriangle;": "\u22b3",
+ "RightTriangleBar;": "\u29d0",
+ "RightTriangleEqual;": "\u22b5",
+ "RightUpDownVector;": "\u294f",
+ "RightUpTeeVector;": "\u295c",
+ "RightUpVector;": "\u21be",
+ "RightUpVectorBar;": "\u2954",
+ "RightVector;": "\u21c0",
+ "RightVectorBar;": "\u2953",
+ "Rightarrow;": "\u21d2",
+ "Ropf;": "\u211d",
+ "RoundImplies;": "\u2970",
+ "Rrightarrow;": "\u21db",
+ "Rscr;": "\u211b",
+ "Rsh;": "\u21b1",
+ "RuleDelayed;": "\u29f4",
+ "SHCHcy;": "\u0429",
+ "SHcy;": "\u0428",
+ "SOFTcy;": "\u042c",
+ "Sacute;": "\u015a",
+ "Sc;": "\u2abc",
+ "Scaron;": "\u0160",
+ "Scedil;": "\u015e",
+ "Scirc;": "\u015c",
+ "Scy;": "\u0421",
+ "Sfr;": "\U0001d516",
+ "ShortDownArrow;": "\u2193",
+ "ShortLeftArrow;": "\u2190",
+ "ShortRightArrow;": "\u2192",
+ "ShortUpArrow;": "\u2191",
+ "Sigma;": "\u03a3",
+ "SmallCircle;": "\u2218",
+ "Sopf;": "\U0001d54a",
+ "Sqrt;": "\u221a",
+ "Square;": "\u25a1",
+ "SquareIntersection;": "\u2293",
+ "SquareSubset;": "\u228f",
+ "SquareSubsetEqual;": "\u2291",
+ "SquareSuperset;": "\u2290",
+ "SquareSupersetEqual;": "\u2292",
+ "SquareUnion;": "\u2294",
+ "Sscr;": "\U0001d4ae",
+ "Star;": "\u22c6",
+ "Sub;": "\u22d0",
+ "Subset;": "\u22d0",
+ "SubsetEqual;": "\u2286",
+ "Succeeds;": "\u227b",
+ "SucceedsEqual;": "\u2ab0",
+ "SucceedsSlantEqual;": "\u227d",
+ "SucceedsTilde;": "\u227f",
+ "SuchThat;": "\u220b",
+ "Sum;": "\u2211",
+ "Sup;": "\u22d1",
+ "Superset;": "\u2283",
+ "SupersetEqual;": "\u2287",
+ "Supset;": "\u22d1",
+ "THORN": "\xde",
+ "THORN;": "\xde",
+ "TRADE;": "\u2122",
+ "TSHcy;": "\u040b",
+ "TScy;": "\u0426",
+ "Tab;": "\t",
+ "Tau;": "\u03a4",
+ "Tcaron;": "\u0164",
+ "Tcedil;": "\u0162",
+ "Tcy;": "\u0422",
+ "Tfr;": "\U0001d517",
+ "Therefore;": "\u2234",
+ "Theta;": "\u0398",
+ "ThickSpace;": "\u205f\u200a",
+ "ThinSpace;": "\u2009",
+ "Tilde;": "\u223c",
+ "TildeEqual;": "\u2243",
+ "TildeFullEqual;": "\u2245",
+ "TildeTilde;": "\u2248",
+ "Topf;": "\U0001d54b",
+ "TripleDot;": "\u20db",
+ "Tscr;": "\U0001d4af",
+ "Tstrok;": "\u0166",
+ "Uacute": "\xda",
+ "Uacute;": "\xda",
+ "Uarr;": "\u219f",
+ "Uarrocir;": "\u2949",
+ "Ubrcy;": "\u040e",
+ "Ubreve;": "\u016c",
+ "Ucirc": "\xdb",
+ "Ucirc;": "\xdb",
+ "Ucy;": "\u0423",
+ "Udblac;": "\u0170",
+ "Ufr;": "\U0001d518",
+ "Ugrave": "\xd9",
+ "Ugrave;": "\xd9",
+ "Umacr;": "\u016a",
+ "UnderBar;": "_",
+ "UnderBrace;": "\u23df",
+ "UnderBracket;": "\u23b5",
+ "UnderParenthesis;": "\u23dd",
+ "Union;": "\u22c3",
+ "UnionPlus;": "\u228e",
+ "Uogon;": "\u0172",
+ "Uopf;": "\U0001d54c",
+ "UpArrow;": "\u2191",
+ "UpArrowBar;": "\u2912",
+ "UpArrowDownArrow;": "\u21c5",
+ "UpDownArrow;": "\u2195",
+ "UpEquilibrium;": "\u296e",
+ "UpTee;": "\u22a5",
+ "UpTeeArrow;": "\u21a5",
+ "Uparrow;": "\u21d1",
+ "Updownarrow;": "\u21d5",
+ "UpperLeftArrow;": "\u2196",
+ "UpperRightArrow;": "\u2197",
+ "Upsi;": "\u03d2",
+ "Upsilon;": "\u03a5",
+ "Uring;": "\u016e",
+ "Uscr;": "\U0001d4b0",
+ "Utilde;": "\u0168",
+ "Uuml": "\xdc",
+ "Uuml;": "\xdc",
+ "VDash;": "\u22ab",
+ "Vbar;": "\u2aeb",
+ "Vcy;": "\u0412",
+ "Vdash;": "\u22a9",
+ "Vdashl;": "\u2ae6",
+ "Vee;": "\u22c1",
+ "Verbar;": "\u2016",
+ "Vert;": "\u2016",
+ "VerticalBar;": "\u2223",
+ "VerticalLine;": "|",
+ "VerticalSeparator;": "\u2758",
+ "VerticalTilde;": "\u2240",
+ "VeryThinSpace;": "\u200a",
+ "Vfr;": "\U0001d519",
+ "Vopf;": "\U0001d54d",
+ "Vscr;": "\U0001d4b1",
+ "Vvdash;": "\u22aa",
+ "Wcirc;": "\u0174",
+ "Wedge;": "\u22c0",
+ "Wfr;": "\U0001d51a",
+ "Wopf;": "\U0001d54e",
+ "Wscr;": "\U0001d4b2",
+ "Xfr;": "\U0001d51b",
+ "Xi;": "\u039e",
+ "Xopf;": "\U0001d54f",
+ "Xscr;": "\U0001d4b3",
+ "YAcy;": "\u042f",
+ "YIcy;": "\u0407",
+ "YUcy;": "\u042e",
+ "Yacute": "\xdd",
+ "Yacute;": "\xdd",
+ "Ycirc;": "\u0176",
+ "Ycy;": "\u042b",
+ "Yfr;": "\U0001d51c",
+ "Yopf;": "\U0001d550",
+ "Yscr;": "\U0001d4b4",
+ "Yuml;": "\u0178",
+ "ZHcy;": "\u0416",
+ "Zacute;": "\u0179",
+ "Zcaron;": "\u017d",
+ "Zcy;": "\u0417",
+ "Zdot;": "\u017b",
+ "ZeroWidthSpace;": "\u200b",
+ "Zeta;": "\u0396",
+ "Zfr;": "\u2128",
+ "Zopf;": "\u2124",
+ "Zscr;": "\U0001d4b5",
+ "aacute": "\xe1",
+ "aacute;": "\xe1",
+ "abreve;": "\u0103",
+ "ac;": "\u223e",
+ "acE;": "\u223e\u0333",
+ "acd;": "\u223f",
+ "acirc": "\xe2",
+ "acirc;": "\xe2",
+ "acute": "\xb4",
+ "acute;": "\xb4",
+ "acy;": "\u0430",
+ "aelig": "\xe6",
+ "aelig;": "\xe6",
+ "af;": "\u2061",
+ "afr;": "\U0001d51e",
+ "agrave": "\xe0",
+ "agrave;": "\xe0",
+ "alefsym;": "\u2135",
+ "aleph;": "\u2135",
+ "alpha;": "\u03b1",
+ "amacr;": "\u0101",
+ "amalg;": "\u2a3f",
+ "amp": "&",
+ "amp;": "&",
+ "and;": "\u2227",
+ "andand;": "\u2a55",
+ "andd;": "\u2a5c",
+ "andslope;": "\u2a58",
+ "andv;": "\u2a5a",
+ "ang;": "\u2220",
+ "ange;": "\u29a4",
+ "angle;": "\u2220",
+ "angmsd;": "\u2221",
+ "angmsdaa;": "\u29a8",
+ "angmsdab;": "\u29a9",
+ "angmsdac;": "\u29aa",
+ "angmsdad;": "\u29ab",
+ "angmsdae;": "\u29ac",
+ "angmsdaf;": "\u29ad",
+ "angmsdag;": "\u29ae",
+ "angmsdah;": "\u29af",
+ "angrt;": "\u221f",
+ "angrtvb;": "\u22be",
+ "angrtvbd;": "\u299d",
+ "angsph;": "\u2222",
+ "angst;": "\xc5",
+ "angzarr;": "\u237c",
+ "aogon;": "\u0105",
+ "aopf;": "\U0001d552",
+ "ap;": "\u2248",
+ "apE;": "\u2a70",
+ "apacir;": "\u2a6f",
+ "ape;": "\u224a",
+ "apid;": "\u224b",
+ "apos;": "'",
+ "approx;": "\u2248",
+ "approxeq;": "\u224a",
+ "aring": "\xe5",
+ "aring;": "\xe5",
+ "ascr;": "\U0001d4b6",
+ "ast;": "*",
+ "asymp;": "\u2248",
+ "asympeq;": "\u224d",
+ "atilde": "\xe3",
+ "atilde;": "\xe3",
+ "auml": "\xe4",
+ "auml;": "\xe4",
+ "awconint;": "\u2233",
+ "awint;": "\u2a11",
+ "bNot;": "\u2aed",
+ "backcong;": "\u224c",
+ "backepsilon;": "\u03f6",
+ "backprime;": "\u2035",
+ "backsim;": "\u223d",
+ "backsimeq;": "\u22cd",
+ "barvee;": "\u22bd",
+ "barwed;": "\u2305",
+ "barwedge;": "\u2305",
+ "bbrk;": "\u23b5",
+ "bbrktbrk;": "\u23b6",
+ "bcong;": "\u224c",
+ "bcy;": "\u0431",
+ "bdquo;": "\u201e",
+ "becaus;": "\u2235",
+ "because;": "\u2235",
+ "bemptyv;": "\u29b0",
+ "bepsi;": "\u03f6",
+ "bernou;": "\u212c",
+ "beta;": "\u03b2",
+ "beth;": "\u2136",
+ "between;": "\u226c",
+ "bfr;": "\U0001d51f",
+ "bigcap;": "\u22c2",
+ "bigcirc;": "\u25ef",
+ "bigcup;": "\u22c3",
+ "bigodot;": "\u2a00",
+ "bigoplus;": "\u2a01",
+ "bigotimes;": "\u2a02",
+ "bigsqcup;": "\u2a06",
+ "bigstar;": "\u2605",
+ "bigtriangledown;": "\u25bd",
+ "bigtriangleup;": "\u25b3",
+ "biguplus;": "\u2a04",
+ "bigvee;": "\u22c1",
+ "bigwedge;": "\u22c0",
+ "bkarow;": "\u290d",
+ "blacklozenge;": "\u29eb",
+ "blacksquare;": "\u25aa",
+ "blacktriangle;": "\u25b4",
+ "blacktriangledown;": "\u25be",
+ "blacktriangleleft;": "\u25c2",
+ "blacktriangleright;": "\u25b8",
+ "blank;": "\u2423",
+ "blk12;": "\u2592",
+ "blk14;": "\u2591",
+ "blk34;": "\u2593",
+ "block;": "\u2588",
+ "bne;": "=\u20e5",
+ "bnequiv;": "\u2261\u20e5",
+ "bnot;": "\u2310",
+ "bopf;": "\U0001d553",
+ "bot;": "\u22a5",
+ "bottom;": "\u22a5",
+ "bowtie;": "\u22c8",
+ "boxDL;": "\u2557",
+ "boxDR;": "\u2554",
+ "boxDl;": "\u2556",
+ "boxDr;": "\u2553",
+ "boxH;": "\u2550",
+ "boxHD;": "\u2566",
+ "boxHU;": "\u2569",
+ "boxHd;": "\u2564",
+ "boxHu;": "\u2567",
+ "boxUL;": "\u255d",
+ "boxUR;": "\u255a",
+ "boxUl;": "\u255c",
+ "boxUr;": "\u2559",
+ "boxV;": "\u2551",
+ "boxVH;": "\u256c",
+ "boxVL;": "\u2563",
+ "boxVR;": "\u2560",
+ "boxVh;": "\u256b",
+ "boxVl;": "\u2562",
+ "boxVr;": "\u255f",
+ "boxbox;": "\u29c9",
+ "boxdL;": "\u2555",
+ "boxdR;": "\u2552",
+ "boxdl;": "\u2510",
+ "boxdr;": "\u250c",
+ "boxh;": "\u2500",
+ "boxhD;": "\u2565",
+ "boxhU;": "\u2568",
+ "boxhd;": "\u252c",
+ "boxhu;": "\u2534",
+ "boxminus;": "\u229f",
+ "boxplus;": "\u229e",
+ "boxtimes;": "\u22a0",
+ "boxuL;": "\u255b",
+ "boxuR;": "\u2558",
+ "boxul;": "\u2518",
+ "boxur;": "\u2514",
+ "boxv;": "\u2502",
+ "boxvH;": "\u256a",
+ "boxvL;": "\u2561",
+ "boxvR;": "\u255e",
+ "boxvh;": "\u253c",
+ "boxvl;": "\u2524",
+ "boxvr;": "\u251c",
+ "bprime;": "\u2035",
+ "breve;": "\u02d8",
+ "brvbar": "\xa6",
+ "brvbar;": "\xa6",
+ "bscr;": "\U0001d4b7",
+ "bsemi;": "\u204f",
+ "bsim;": "\u223d",
+ "bsime;": "\u22cd",
+ "bsol;": "\\",
+ "bsolb;": "\u29c5",
+ "bsolhsub;": "\u27c8",
+ "bull;": "\u2022",
+ "bullet;": "\u2022",
+ "bump;": "\u224e",
+ "bumpE;": "\u2aae",
+ "bumpe;": "\u224f",
+ "bumpeq;": "\u224f",
+ "cacute;": "\u0107",
+ "cap;": "\u2229",
+ "capand;": "\u2a44",
+ "capbrcup;": "\u2a49",
+ "capcap;": "\u2a4b",
+ "capcup;": "\u2a47",
+ "capdot;": "\u2a40",
+ "caps;": "\u2229\ufe00",
+ "caret;": "\u2041",
+ "caron;": "\u02c7",
+ "ccaps;": "\u2a4d",
+ "ccaron;": "\u010d",
+ "ccedil": "\xe7",
+ "ccedil;": "\xe7",
+ "ccirc;": "\u0109",
+ "ccups;": "\u2a4c",
+ "ccupssm;": "\u2a50",
+ "cdot;": "\u010b",
+ "cedil": "\xb8",
+ "cedil;": "\xb8",
+ "cemptyv;": "\u29b2",
+ "cent": "\xa2",
+ "cent;": "\xa2",
+ "centerdot;": "\xb7",
+ "cfr;": "\U0001d520",
+ "chcy;": "\u0447",
+ "check;": "\u2713",
+ "checkmark;": "\u2713",
+ "chi;": "\u03c7",
+ "cir;": "\u25cb",
+ "cirE;": "\u29c3",
+ "circ;": "\u02c6",
+ "circeq;": "\u2257",
+ "circlearrowleft;": "\u21ba",
+ "circlearrowright;": "\u21bb",
+ "circledR;": "\xae",
+ "circledS;": "\u24c8",
+ "circledast;": "\u229b",
+ "circledcirc;": "\u229a",
+ "circleddash;": "\u229d",
+ "cire;": "\u2257",
+ "cirfnint;": "\u2a10",
+ "cirmid;": "\u2aef",
+ "cirscir;": "\u29c2",
+ "clubs;": "\u2663",
+ "clubsuit;": "\u2663",
+ "colon;": ":",
+ "colone;": "\u2254",
+ "coloneq;": "\u2254",
+ "comma;": ",",
+ "commat;": "@",
+ "comp;": "\u2201",
+ "compfn;": "\u2218",
+ "complement;": "\u2201",
+ "complexes;": "\u2102",
+ "cong;": "\u2245",
+ "congdot;": "\u2a6d",
+ "conint;": "\u222e",
+ "copf;": "\U0001d554",
+ "coprod;": "\u2210",
+ "copy": "\xa9",
+ "copy;": "\xa9",
+ "copysr;": "\u2117",
+ "crarr;": "\u21b5",
+ "cross;": "\u2717",
+ "cscr;": "\U0001d4b8",
+ "csub;": "\u2acf",
+ "csube;": "\u2ad1",
+ "csup;": "\u2ad0",
+ "csupe;": "\u2ad2",
+ "ctdot;": "\u22ef",
+ "cudarrl;": "\u2938",
+ "cudarrr;": "\u2935",
+ "cuepr;": "\u22de",
+ "cuesc;": "\u22df",
+ "cularr;": "\u21b6",
+ "cularrp;": "\u293d",
+ "cup;": "\u222a",
+ "cupbrcap;": "\u2a48",
+ "cupcap;": "\u2a46",
+ "cupcup;": "\u2a4a",
+ "cupdot;": "\u228d",
+ "cupor;": "\u2a45",
+ "cups;": "\u222a\ufe00",
+ "curarr;": "\u21b7",
+ "curarrm;": "\u293c",
+ "curlyeqprec;": "\u22de",
+ "curlyeqsucc;": "\u22df",
+ "curlyvee;": "\u22ce",
+ "curlywedge;": "\u22cf",
+ "curren": "\xa4",
+ "curren;": "\xa4",
+ "curvearrowleft;": "\u21b6",
+ "curvearrowright;": "\u21b7",
+ "cuvee;": "\u22ce",
+ "cuwed;": "\u22cf",
+ "cwconint;": "\u2232",
+ "cwint;": "\u2231",
+ "cylcty;": "\u232d",
+ "dArr;": "\u21d3",
+ "dHar;": "\u2965",
+ "dagger;": "\u2020",
+ "daleth;": "\u2138",
+ "darr;": "\u2193",
+ "dash;": "\u2010",
+ "dashv;": "\u22a3",
+ "dbkarow;": "\u290f",
+ "dblac;": "\u02dd",
+ "dcaron;": "\u010f",
+ "dcy;": "\u0434",
+ "dd;": "\u2146",
+ "ddagger;": "\u2021",
+ "ddarr;": "\u21ca",
+ "ddotseq;": "\u2a77",
+ "deg": "\xb0",
+ "deg;": "\xb0",
+ "delta;": "\u03b4",
+ "demptyv;": "\u29b1",
+ "dfisht;": "\u297f",
+ "dfr;": "\U0001d521",
+ "dharl;": "\u21c3",
+ "dharr;": "\u21c2",
+ "diam;": "\u22c4",
+ "diamond;": "\u22c4",
+ "diamondsuit;": "\u2666",
+ "diams;": "\u2666",
+ "die;": "\xa8",
+ "digamma;": "\u03dd",
+ "disin;": "\u22f2",
+ "div;": "\xf7",
+ "divide": "\xf7",
+ "divide;": "\xf7",
+ "divideontimes;": "\u22c7",
+ "divonx;": "\u22c7",
+ "djcy;": "\u0452",
+ "dlcorn;": "\u231e",
+ "dlcrop;": "\u230d",
+ "dollar;": "$",
+ "dopf;": "\U0001d555",
+ "dot;": "\u02d9",
+ "doteq;": "\u2250",
+ "doteqdot;": "\u2251",
+ "dotminus;": "\u2238",
+ "dotplus;": "\u2214",
+ "dotsquare;": "\u22a1",
+ "doublebarwedge;": "\u2306",
+ "downarrow;": "\u2193",
+ "downdownarrows;": "\u21ca",
+ "downharpoonleft;": "\u21c3",
+ "downharpoonright;": "\u21c2",
+ "drbkarow;": "\u2910",
+ "drcorn;": "\u231f",
+ "drcrop;": "\u230c",
+ "dscr;": "\U0001d4b9",
+ "dscy;": "\u0455",
+ "dsol;": "\u29f6",
+ "dstrok;": "\u0111",
+ "dtdot;": "\u22f1",
+ "dtri;": "\u25bf",
+ "dtrif;": "\u25be",
+ "duarr;": "\u21f5",
+ "duhar;": "\u296f",
+ "dwangle;": "\u29a6",
+ "dzcy;": "\u045f",
+ "dzigrarr;": "\u27ff",
+ "eDDot;": "\u2a77",
+ "eDot;": "\u2251",
+ "eacute": "\xe9",
+ "eacute;": "\xe9",
+ "easter;": "\u2a6e",
+ "ecaron;": "\u011b",
+ "ecir;": "\u2256",
+ "ecirc": "\xea",
+ "ecirc;": "\xea",
+ "ecolon;": "\u2255",
+ "ecy;": "\u044d",
+ "edot;": "\u0117",
+ "ee;": "\u2147",
+ "efDot;": "\u2252",
+ "efr;": "\U0001d522",
+ "eg;": "\u2a9a",
+ "egrave": "\xe8",
+ "egrave;": "\xe8",
+ "egs;": "\u2a96",
+ "egsdot;": "\u2a98",
+ "el;": "\u2a99",
+ "elinters;": "\u23e7",
+ "ell;": "\u2113",
+ "els;": "\u2a95",
+ "elsdot;": "\u2a97",
+ "emacr;": "\u0113",
+ "empty;": "\u2205",
+ "emptyset;": "\u2205",
+ "emptyv;": "\u2205",
+ "emsp13;": "\u2004",
+ "emsp14;": "\u2005",
+ "emsp;": "\u2003",
+ "eng;": "\u014b",
+ "ensp;": "\u2002",
+ "eogon;": "\u0119",
+ "eopf;": "\U0001d556",
+ "epar;": "\u22d5",
+ "eparsl;": "\u29e3",
+ "eplus;": "\u2a71",
+ "epsi;": "\u03b5",
+ "epsilon;": "\u03b5",
+ "epsiv;": "\u03f5",
+ "eqcirc;": "\u2256",
+ "eqcolon;": "\u2255",
+ "eqsim;": "\u2242",
+ "eqslantgtr;": "\u2a96",
+ "eqslantless;": "\u2a95",
+ "equals;": "=",
+ "equest;": "\u225f",
+ "equiv;": "\u2261",
+ "equivDD;": "\u2a78",
+ "eqvparsl;": "\u29e5",
+ "erDot;": "\u2253",
+ "erarr;": "\u2971",
+ "escr;": "\u212f",
+ "esdot;": "\u2250",
+ "esim;": "\u2242",
+ "eta;": "\u03b7",
+ "eth": "\xf0",
+ "eth;": "\xf0",
+ "euml": "\xeb",
+ "euml;": "\xeb",
+ "euro;": "\u20ac",
+ "excl;": "!",
+ "exist;": "\u2203",
+ "expectation;": "\u2130",
+ "exponentiale;": "\u2147",
+ "fallingdotseq;": "\u2252",
+ "fcy;": "\u0444",
+ "female;": "\u2640",
+ "ffilig;": "\ufb03",
+ "fflig;": "\ufb00",
+ "ffllig;": "\ufb04",
+ "ffr;": "\U0001d523",
+ "filig;": "\ufb01",
+ "fjlig;": "fj",
+ "flat;": "\u266d",
+ "fllig;": "\ufb02",
+ "fltns;": "\u25b1",
+ "fnof;": "\u0192",
+ "fopf;": "\U0001d557",
+ "forall;": "\u2200",
+ "fork;": "\u22d4",
+ "forkv;": "\u2ad9",
+ "fpartint;": "\u2a0d",
+ "frac12": "\xbd",
+ "frac12;": "\xbd",
+ "frac13;": "\u2153",
+ "frac14": "\xbc",
+ "frac14;": "\xbc",
+ "frac15;": "\u2155",
+ "frac16;": "\u2159",
+ "frac18;": "\u215b",
+ "frac23;": "\u2154",
+ "frac25;": "\u2156",
+ "frac34": "\xbe",
+ "frac34;": "\xbe",
+ "frac35;": "\u2157",
+ "frac38;": "\u215c",
+ "frac45;": "\u2158",
+ "frac56;": "\u215a",
+ "frac58;": "\u215d",
+ "frac78;": "\u215e",
+ "frasl;": "\u2044",
+ "frown;": "\u2322",
+ "fscr;": "\U0001d4bb",
+ "gE;": "\u2267",
+ "gEl;": "\u2a8c",
+ "gacute;": "\u01f5",
+ "gamma;": "\u03b3",
+ "gammad;": "\u03dd",
+ "gap;": "\u2a86",
+ "gbreve;": "\u011f",
+ "gcirc;": "\u011d",
+ "gcy;": "\u0433",
+ "gdot;": "\u0121",
+ "ge;": "\u2265",
+ "gel;": "\u22db",
+ "geq;": "\u2265",
+ "geqq;": "\u2267",
+ "geqslant;": "\u2a7e",
+ "ges;": "\u2a7e",
+ "gescc;": "\u2aa9",
+ "gesdot;": "\u2a80",
+ "gesdoto;": "\u2a82",
+ "gesdotol;": "\u2a84",
+ "gesl;": "\u22db\ufe00",
+ "gesles;": "\u2a94",
+ "gfr;": "\U0001d524",
+ "gg;": "\u226b",
+ "ggg;": "\u22d9",
+ "gimel;": "\u2137",
+ "gjcy;": "\u0453",
+ "gl;": "\u2277",
+ "glE;": "\u2a92",
+ "gla;": "\u2aa5",
+ "glj;": "\u2aa4",
+ "gnE;": "\u2269",
+ "gnap;": "\u2a8a",
+ "gnapprox;": "\u2a8a",
+ "gne;": "\u2a88",
+ "gneq;": "\u2a88",
+ "gneqq;": "\u2269",
+ "gnsim;": "\u22e7",
+ "gopf;": "\U0001d558",
+ "grave;": "`",
+ "gscr;": "\u210a",
+ "gsim;": "\u2273",
+ "gsime;": "\u2a8e",
+ "gsiml;": "\u2a90",
+ "gt": ">",
+ "gt;": ">",
+ "gtcc;": "\u2aa7",
+ "gtcir;": "\u2a7a",
+ "gtdot;": "\u22d7",
+ "gtlPar;": "\u2995",
+ "gtquest;": "\u2a7c",
+ "gtrapprox;": "\u2a86",
+ "gtrarr;": "\u2978",
+ "gtrdot;": "\u22d7",
+ "gtreqless;": "\u22db",
+ "gtreqqless;": "\u2a8c",
+ "gtrless;": "\u2277",
+ "gtrsim;": "\u2273",
+ "gvertneqq;": "\u2269\ufe00",
+ "gvnE;": "\u2269\ufe00",
+ "hArr;": "\u21d4",
+ "hairsp;": "\u200a",
+ "half;": "\xbd",
+ "hamilt;": "\u210b",
+ "hardcy;": "\u044a",
+ "harr;": "\u2194",
+ "harrcir;": "\u2948",
+ "harrw;": "\u21ad",
+ "hbar;": "\u210f",
+ "hcirc;": "\u0125",
+ "hearts;": "\u2665",
+ "heartsuit;": "\u2665",
+ "hellip;": "\u2026",
+ "hercon;": "\u22b9",
+ "hfr;": "\U0001d525",
+ "hksearow;": "\u2925",
+ "hkswarow;": "\u2926",
+ "hoarr;": "\u21ff",
+ "homtht;": "\u223b",
+ "hookleftarrow;": "\u21a9",
+ "hookrightarrow;": "\u21aa",
+ "hopf;": "\U0001d559",
+ "horbar;": "\u2015",
+ "hscr;": "\U0001d4bd",
+ "hslash;": "\u210f",
+ "hstrok;": "\u0127",
+ "hybull;": "\u2043",
+ "hyphen;": "\u2010",
+ "iacute": "\xed",
+ "iacute;": "\xed",
+ "ic;": "\u2063",
+ "icirc": "\xee",
+ "icirc;": "\xee",
+ "icy;": "\u0438",
+ "iecy;": "\u0435",
+ "iexcl": "\xa1",
+ "iexcl;": "\xa1",
+ "iff;": "\u21d4",
+ "ifr;": "\U0001d526",
+ "igrave": "\xec",
+ "igrave;": "\xec",
+ "ii;": "\u2148",
+ "iiiint;": "\u2a0c",
+ "iiint;": "\u222d",
+ "iinfin;": "\u29dc",
+ "iiota;": "\u2129",
+ "ijlig;": "\u0133",
+ "imacr;": "\u012b",
+ "image;": "\u2111",
+ "imagline;": "\u2110",
+ "imagpart;": "\u2111",
+ "imath;": "\u0131",
+ "imof;": "\u22b7",
+ "imped;": "\u01b5",
+ "in;": "\u2208",
+ "incare;": "\u2105",
+ "infin;": "\u221e",
+ "infintie;": "\u29dd",
+ "inodot;": "\u0131",
+ "int;": "\u222b",
+ "intcal;": "\u22ba",
+ "integers;": "\u2124",
+ "intercal;": "\u22ba",
+ "intlarhk;": "\u2a17",
+ "intprod;": "\u2a3c",
+ "iocy;": "\u0451",
+ "iogon;": "\u012f",
+ "iopf;": "\U0001d55a",
+ "iota;": "\u03b9",
+ "iprod;": "\u2a3c",
+ "iquest": "\xbf",
+ "iquest;": "\xbf",
+ "iscr;": "\U0001d4be",
+ "isin;": "\u2208",
+ "isinE;": "\u22f9",
+ "isindot;": "\u22f5",
+ "isins;": "\u22f4",
+ "isinsv;": "\u22f3",
+ "isinv;": "\u2208",
+ "it;": "\u2062",
+ "itilde;": "\u0129",
+ "iukcy;": "\u0456",
+ "iuml": "\xef",
+ "iuml;": "\xef",
+ "jcirc;": "\u0135",
+ "jcy;": "\u0439",
+ "jfr;": "\U0001d527",
+ "jmath;": "\u0237",
+ "jopf;": "\U0001d55b",
+ "jscr;": "\U0001d4bf",
+ "jsercy;": "\u0458",
+ "jukcy;": "\u0454",
+ "kappa;": "\u03ba",
+ "kappav;": "\u03f0",
+ "kcedil;": "\u0137",
+ "kcy;": "\u043a",
+ "kfr;": "\U0001d528",
+ "kgreen;": "\u0138",
+ "khcy;": "\u0445",
+ "kjcy;": "\u045c",
+ "kopf;": "\U0001d55c",
+ "kscr;": "\U0001d4c0",
+ "lAarr;": "\u21da",
+ "lArr;": "\u21d0",
+ "lAtail;": "\u291b",
+ "lBarr;": "\u290e",
+ "lE;": "\u2266",
+ "lEg;": "\u2a8b",
+ "lHar;": "\u2962",
+ "lacute;": "\u013a",
+ "laemptyv;": "\u29b4",
+ "lagran;": "\u2112",
+ "lambda;": "\u03bb",
+ "lang;": "\u27e8",
+ "langd;": "\u2991",
+ "langle;": "\u27e8",
+ "lap;": "\u2a85",
+ "laquo": "\xab",
+ "laquo;": "\xab",
+ "larr;": "\u2190",
+ "larrb;": "\u21e4",
+ "larrbfs;": "\u291f",
+ "larrfs;": "\u291d",
+ "larrhk;": "\u21a9",
+ "larrlp;": "\u21ab",
+ "larrpl;": "\u2939",
+ "larrsim;": "\u2973",
+ "larrtl;": "\u21a2",
+ "lat;": "\u2aab",
+ "latail;": "\u2919",
+ "late;": "\u2aad",
+ "lates;": "\u2aad\ufe00",
+ "lbarr;": "\u290c",
+ "lbbrk;": "\u2772",
+ "lbrace;": "{",
+ "lbrack;": "[",
+ "lbrke;": "\u298b",
+ "lbrksld;": "\u298f",
+ "lbrkslu;": "\u298d",
+ "lcaron;": "\u013e",
+ "lcedil;": "\u013c",
+ "lceil;": "\u2308",
+ "lcub;": "{",
+ "lcy;": "\u043b",
+ "ldca;": "\u2936",
+ "ldquo;": "\u201c",
+ "ldquor;": "\u201e",
+ "ldrdhar;": "\u2967",
+ "ldrushar;": "\u294b",
+ "ldsh;": "\u21b2",
+ "le;": "\u2264",
+ "leftarrow;": "\u2190",
+ "leftarrowtail;": "\u21a2",
+ "leftharpoondown;": "\u21bd",
+ "leftharpoonup;": "\u21bc",
+ "leftleftarrows;": "\u21c7",
+ "leftrightarrow;": "\u2194",
+ "leftrightarrows;": "\u21c6",
+ "leftrightharpoons;": "\u21cb",
+ "leftrightsquigarrow;": "\u21ad",
+ "leftthreetimes;": "\u22cb",
+ "leg;": "\u22da",
+ "leq;": "\u2264",
+ "leqq;": "\u2266",
+ "leqslant;": "\u2a7d",
+ "les;": "\u2a7d",
+ "lescc;": "\u2aa8",
+ "lesdot;": "\u2a7f",
+ "lesdoto;": "\u2a81",
+ "lesdotor;": "\u2a83",
+ "lesg;": "\u22da\ufe00",
+ "lesges;": "\u2a93",
+ "lessapprox;": "\u2a85",
+ "lessdot;": "\u22d6",
+ "lesseqgtr;": "\u22da",
+ "lesseqqgtr;": "\u2a8b",
+ "lessgtr;": "\u2276",
+ "lesssim;": "\u2272",
+ "lfisht;": "\u297c",
+ "lfloor;": "\u230a",
+ "lfr;": "\U0001d529",
+ "lg;": "\u2276",
+ "lgE;": "\u2a91",
+ "lhard;": "\u21bd",
+ "lharu;": "\u21bc",
+ "lharul;": "\u296a",
+ "lhblk;": "\u2584",
+ "ljcy;": "\u0459",
+ "ll;": "\u226a",
+ "llarr;": "\u21c7",
+ "llcorner;": "\u231e",
+ "llhard;": "\u296b",
+ "lltri;": "\u25fa",
+ "lmidot;": "\u0140",
+ "lmoust;": "\u23b0",
+ "lmoustache;": "\u23b0",
+ "lnE;": "\u2268",
+ "lnap;": "\u2a89",
+ "lnapprox;": "\u2a89",
+ "lne;": "\u2a87",
+ "lneq;": "\u2a87",
+ "lneqq;": "\u2268",
+ "lnsim;": "\u22e6",
+ "loang;": "\u27ec",
+ "loarr;": "\u21fd",
+ "lobrk;": "\u27e6",
+ "longleftarrow;": "\u27f5",
+ "longleftrightarrow;": "\u27f7",
+ "longmapsto;": "\u27fc",
+ "longrightarrow;": "\u27f6",
+ "looparrowleft;": "\u21ab",
+ "looparrowright;": "\u21ac",
+ "lopar;": "\u2985",
+ "lopf;": "\U0001d55d",
+ "loplus;": "\u2a2d",
+ "lotimes;": "\u2a34",
+ "lowast;": "\u2217",
+ "lowbar;": "_",
+ "loz;": "\u25ca",
+ "lozenge;": "\u25ca",
+ "lozf;": "\u29eb",
+ "lpar;": "(",
+ "lparlt;": "\u2993",
+ "lrarr;": "\u21c6",
+ "lrcorner;": "\u231f",
+ "lrhar;": "\u21cb",
+ "lrhard;": "\u296d",
+ "lrm;": "\u200e",
+ "lrtri;": "\u22bf",
+ "lsaquo;": "\u2039",
+ "lscr;": "\U0001d4c1",
+ "lsh;": "\u21b0",
+ "lsim;": "\u2272",
+ "lsime;": "\u2a8d",
+ "lsimg;": "\u2a8f",
+ "lsqb;": "[",
+ "lsquo;": "\u2018",
+ "lsquor;": "\u201a",
+ "lstrok;": "\u0142",
+ "lt": "<",
+ "lt;": "<",
+ "ltcc;": "\u2aa6",
+ "ltcir;": "\u2a79",
+ "ltdot;": "\u22d6",
+ "lthree;": "\u22cb",
+ "ltimes;": "\u22c9",
+ "ltlarr;": "\u2976",
+ "ltquest;": "\u2a7b",
+ "ltrPar;": "\u2996",
+ "ltri;": "\u25c3",
+ "ltrie;": "\u22b4",
+ "ltrif;": "\u25c2",
+ "lurdshar;": "\u294a",
+ "luruhar;": "\u2966",
+ "lvertneqq;": "\u2268\ufe00",
+ "lvnE;": "\u2268\ufe00",
+ "mDDot;": "\u223a",
+ "macr": "\xaf",
+ "macr;": "\xaf",
+ "male;": "\u2642",
+ "malt;": "\u2720",
+ "maltese;": "\u2720",
+ "map;": "\u21a6",
+ "mapsto;": "\u21a6",
+ "mapstodown;": "\u21a7",
+ "mapstoleft;": "\u21a4",
+ "mapstoup;": "\u21a5",
+ "marker;": "\u25ae",
+ "mcomma;": "\u2a29",
+ "mcy;": "\u043c",
+ "mdash;": "\u2014",
+ "measuredangle;": "\u2221",
+ "mfr;": "\U0001d52a",
+ "mho;": "\u2127",
+ "micro": "\xb5",
+ "micro;": "\xb5",
+ "mid;": "\u2223",
+ "midast;": "*",
+ "midcir;": "\u2af0",
+ "middot": "\xb7",
+ "middot;": "\xb7",
+ "minus;": "\u2212",
+ "minusb;": "\u229f",
+ "minusd;": "\u2238",
+ "minusdu;": "\u2a2a",
+ "mlcp;": "\u2adb",
+ "mldr;": "\u2026",
+ "mnplus;": "\u2213",
+ "models;": "\u22a7",
+ "mopf;": "\U0001d55e",
+ "mp;": "\u2213",
+ "mscr;": "\U0001d4c2",
+ "mstpos;": "\u223e",
+ "mu;": "\u03bc",
+ "multimap;": "\u22b8",
+ "mumap;": "\u22b8",
+ "nGg;": "\u22d9\u0338",
+ "nGt;": "\u226b\u20d2",
+ "nGtv;": "\u226b\u0338",
+ "nLeftarrow;": "\u21cd",
+ "nLeftrightarrow;": "\u21ce",
+ "nLl;": "\u22d8\u0338",
+ "nLt;": "\u226a\u20d2",
+ "nLtv;": "\u226a\u0338",
+ "nRightarrow;": "\u21cf",
+ "nVDash;": "\u22af",
+ "nVdash;": "\u22ae",
+ "nabla;": "\u2207",
+ "nacute;": "\u0144",
+ "nang;": "\u2220\u20d2",
+ "nap;": "\u2249",
+ "napE;": "\u2a70\u0338",
+ "napid;": "\u224b\u0338",
+ "napos;": "\u0149",
+ "napprox;": "\u2249",
+ "natur;": "\u266e",
+ "natural;": "\u266e",
+ "naturals;": "\u2115",
+ "nbsp": "\xa0",
+ "nbsp;": "\xa0",
+ "nbump;": "\u224e\u0338",
+ "nbumpe;": "\u224f\u0338",
+ "ncap;": "\u2a43",
+ "ncaron;": "\u0148",
+ "ncedil;": "\u0146",
+ "ncong;": "\u2247",
+ "ncongdot;": "\u2a6d\u0338",
+ "ncup;": "\u2a42",
+ "ncy;": "\u043d",
+ "ndash;": "\u2013",
+ "ne;": "\u2260",
+ "neArr;": "\u21d7",
+ "nearhk;": "\u2924",
+ "nearr;": "\u2197",
+ "nearrow;": "\u2197",
+ "nedot;": "\u2250\u0338",
+ "nequiv;": "\u2262",
+ "nesear;": "\u2928",
+ "nesim;": "\u2242\u0338",
+ "nexist;": "\u2204",
+ "nexists;": "\u2204",
+ "nfr;": "\U0001d52b",
+ "ngE;": "\u2267\u0338",
+ "nge;": "\u2271",
+ "ngeq;": "\u2271",
+ "ngeqq;": "\u2267\u0338",
+ "ngeqslant;": "\u2a7e\u0338",
+ "nges;": "\u2a7e\u0338",
+ "ngsim;": "\u2275",
+ "ngt;": "\u226f",
+ "ngtr;": "\u226f",
+ "nhArr;": "\u21ce",
+ "nharr;": "\u21ae",
+ "nhpar;": "\u2af2",
+ "ni;": "\u220b",
+ "nis;": "\u22fc",
+ "nisd;": "\u22fa",
+ "niv;": "\u220b",
+ "njcy;": "\u045a",
+ "nlArr;": "\u21cd",
+ "nlE;": "\u2266\u0338",
+ "nlarr;": "\u219a",
+ "nldr;": "\u2025",
+ "nle;": "\u2270",
+ "nleftarrow;": "\u219a",
+ "nleftrightarrow;": "\u21ae",
+ "nleq;": "\u2270",
+ "nleqq;": "\u2266\u0338",
+ "nleqslant;": "\u2a7d\u0338",
+ "nles;": "\u2a7d\u0338",
+ "nless;": "\u226e",
+ "nlsim;": "\u2274",
+ "nlt;": "\u226e",
+ "nltri;": "\u22ea",
+ "nltrie;": "\u22ec",
+ "nmid;": "\u2224",
+ "nopf;": "\U0001d55f",
+ "not": "\xac",
+ "not;": "\xac",
+ "notin;": "\u2209",
+ "notinE;": "\u22f9\u0338",
+ "notindot;": "\u22f5\u0338",
+ "notinva;": "\u2209",
+ "notinvb;": "\u22f7",
+ "notinvc;": "\u22f6",
+ "notni;": "\u220c",
+ "notniva;": "\u220c",
+ "notnivb;": "\u22fe",
+ "notnivc;": "\u22fd",
+ "npar;": "\u2226",
+ "nparallel;": "\u2226",
+ "nparsl;": "\u2afd\u20e5",
+ "npart;": "\u2202\u0338",
+ "npolint;": "\u2a14",
+ "npr;": "\u2280",
+ "nprcue;": "\u22e0",
+ "npre;": "\u2aaf\u0338",
+ "nprec;": "\u2280",
+ "npreceq;": "\u2aaf\u0338",
+ "nrArr;": "\u21cf",
+ "nrarr;": "\u219b",
+ "nrarrc;": "\u2933\u0338",
+ "nrarrw;": "\u219d\u0338",
+ "nrightarrow;": "\u219b",
+ "nrtri;": "\u22eb",
+ "nrtrie;": "\u22ed",
+ "nsc;": "\u2281",
+ "nsccue;": "\u22e1",
+ "nsce;": "\u2ab0\u0338",
+ "nscr;": "\U0001d4c3",
+ "nshortmid;": "\u2224",
+ "nshortparallel;": "\u2226",
+ "nsim;": "\u2241",
+ "nsime;": "\u2244",
+ "nsimeq;": "\u2244",
+ "nsmid;": "\u2224",
+ "nspar;": "\u2226",
+ "nsqsube;": "\u22e2",
+ "nsqsupe;": "\u22e3",
+ "nsub;": "\u2284",
+ "nsubE;": "\u2ac5\u0338",
+ "nsube;": "\u2288",
+ "nsubset;": "\u2282\u20d2",
+ "nsubseteq;": "\u2288",
+ "nsubseteqq;": "\u2ac5\u0338",
+ "nsucc;": "\u2281",
+ "nsucceq;": "\u2ab0\u0338",
+ "nsup;": "\u2285",
+ "nsupE;": "\u2ac6\u0338",
+ "nsupe;": "\u2289",
+ "nsupset;": "\u2283\u20d2",
+ "nsupseteq;": "\u2289",
+ "nsupseteqq;": "\u2ac6\u0338",
+ "ntgl;": "\u2279",
+ "ntilde": "\xf1",
+ "ntilde;": "\xf1",
+ "ntlg;": "\u2278",
+ "ntriangleleft;": "\u22ea",
+ "ntrianglelefteq;": "\u22ec",
+ "ntriangleright;": "\u22eb",
+ "ntrianglerighteq;": "\u22ed",
+ "nu;": "\u03bd",
+ "num;": "#",
+ "numero;": "\u2116",
+ "numsp;": "\u2007",
+ "nvDash;": "\u22ad",
+ "nvHarr;": "\u2904",
+ "nvap;": "\u224d\u20d2",
+ "nvdash;": "\u22ac",
+ "nvge;": "\u2265\u20d2",
+ "nvgt;": ">\u20d2",
+ "nvinfin;": "\u29de",
+ "nvlArr;": "\u2902",
+ "nvle;": "\u2264\u20d2",
+ "nvlt;": "<\u20d2",
+ "nvltrie;": "\u22b4\u20d2",
+ "nvrArr;": "\u2903",
+ "nvrtrie;": "\u22b5\u20d2",
+ "nvsim;": "\u223c\u20d2",
+ "nwArr;": "\u21d6",
+ "nwarhk;": "\u2923",
+ "nwarr;": "\u2196",
+ "nwarrow;": "\u2196",
+ "nwnear;": "\u2927",
+ "oS;": "\u24c8",
+ "oacute": "\xf3",
+ "oacute;": "\xf3",
+ "oast;": "\u229b",
+ "ocir;": "\u229a",
+ "ocirc": "\xf4",
+ "ocirc;": "\xf4",
+ "ocy;": "\u043e",
+ "odash;": "\u229d",
+ "odblac;": "\u0151",
+ "odiv;": "\u2a38",
+ "odot;": "\u2299",
+ "odsold;": "\u29bc",
+ "oelig;": "\u0153",
+ "ofcir;": "\u29bf",
+ "ofr;": "\U0001d52c",
+ "ogon;": "\u02db",
+ "ograve": "\xf2",
+ "ograve;": "\xf2",
+ "ogt;": "\u29c1",
+ "ohbar;": "\u29b5",
+ "ohm;": "\u03a9",
+ "oint;": "\u222e",
+ "olarr;": "\u21ba",
+ "olcir;": "\u29be",
+ "olcross;": "\u29bb",
+ "oline;": "\u203e",
+ "olt;": "\u29c0",
+ "omacr;": "\u014d",
+ "omega;": "\u03c9",
+ "omicron;": "\u03bf",
+ "omid;": "\u29b6",
+ "ominus;": "\u2296",
+ "oopf;": "\U0001d560",
+ "opar;": "\u29b7",
+ "operp;": "\u29b9",
+ "oplus;": "\u2295",
+ "or;": "\u2228",
+ "orarr;": "\u21bb",
+ "ord;": "\u2a5d",
+ "order;": "\u2134",
+ "orderof;": "\u2134",
+ "ordf": "\xaa",
+ "ordf;": "\xaa",
+ "ordm": "\xba",
+ "ordm;": "\xba",
+ "origof;": "\u22b6",
+ "oror;": "\u2a56",
+ "orslope;": "\u2a57",
+ "orv;": "\u2a5b",
+ "oscr;": "\u2134",
+ "oslash": "\xf8",
+ "oslash;": "\xf8",
+ "osol;": "\u2298",
+ "otilde": "\xf5",
+ "otilde;": "\xf5",
+ "otimes;": "\u2297",
+ "otimesas;": "\u2a36",
+ "ouml": "\xf6",
+ "ouml;": "\xf6",
+ "ovbar;": "\u233d",
+ "par;": "\u2225",
+ "para": "\xb6",
+ "para;": "\xb6",
+ "parallel;": "\u2225",
+ "parsim;": "\u2af3",
+ "parsl;": "\u2afd",
+ "part;": "\u2202",
+ "pcy;": "\u043f",
+ "percnt;": "%",
+ "period;": ".",
+ "permil;": "\u2030",
+ "perp;": "\u22a5",
+ "pertenk;": "\u2031",
+ "pfr;": "\U0001d52d",
+ "phi;": "\u03c6",
+ "phiv;": "\u03d5",
+ "phmmat;": "\u2133",
+ "phone;": "\u260e",
+ "pi;": "\u03c0",
+ "pitchfork;": "\u22d4",
+ "piv;": "\u03d6",
+ "planck;": "\u210f",
+ "planckh;": "\u210e",
+ "plankv;": "\u210f",
+ "plus;": "+",
+ "plusacir;": "\u2a23",
+ "plusb;": "\u229e",
+ "pluscir;": "\u2a22",
+ "plusdo;": "\u2214",
+ "plusdu;": "\u2a25",
+ "pluse;": "\u2a72",
+ "plusmn": "\xb1",
+ "plusmn;": "\xb1",
+ "plussim;": "\u2a26",
+ "plustwo;": "\u2a27",
+ "pm;": "\xb1",
+ "pointint;": "\u2a15",
+ "popf;": "\U0001d561",
+ "pound": "\xa3",
+ "pound;": "\xa3",
+ "pr;": "\u227a",
+ "prE;": "\u2ab3",
+ "prap;": "\u2ab7",
+ "prcue;": "\u227c",
+ "pre;": "\u2aaf",
+ "prec;": "\u227a",
+ "precapprox;": "\u2ab7",
+ "preccurlyeq;": "\u227c",
+ "preceq;": "\u2aaf",
+ "precnapprox;": "\u2ab9",
+ "precneqq;": "\u2ab5",
+ "precnsim;": "\u22e8",
+ "precsim;": "\u227e",
+ "prime;": "\u2032",
+ "primes;": "\u2119",
+ "prnE;": "\u2ab5",
+ "prnap;": "\u2ab9",
+ "prnsim;": "\u22e8",
+ "prod;": "\u220f",
+ "profalar;": "\u232e",
+ "profline;": "\u2312",
+ "profsurf;": "\u2313",
+ "prop;": "\u221d",
+ "propto;": "\u221d",
+ "prsim;": "\u227e",
+ "prurel;": "\u22b0",
+ "pscr;": "\U0001d4c5",
+ "psi;": "\u03c8",
+ "puncsp;": "\u2008",
+ "qfr;": "\U0001d52e",
+ "qint;": "\u2a0c",
+ "qopf;": "\U0001d562",
+ "qprime;": "\u2057",
+ "qscr;": "\U0001d4c6",
+ "quaternions;": "\u210d",
+ "quatint;": "\u2a16",
+ "quest;": "?",
+ "questeq;": "\u225f",
+ "quot": "\"",
+ "quot;": "\"",
+ "rAarr;": "\u21db",
+ "rArr;": "\u21d2",
+ "rAtail;": "\u291c",
+ "rBarr;": "\u290f",
+ "rHar;": "\u2964",
+ "race;": "\u223d\u0331",
+ "racute;": "\u0155",
+ "radic;": "\u221a",
+ "raemptyv;": "\u29b3",
+ "rang;": "\u27e9",
+ "rangd;": "\u2992",
+ "range;": "\u29a5",
+ "rangle;": "\u27e9",
+ "raquo": "\xbb",
+ "raquo;": "\xbb",
+ "rarr;": "\u2192",
+ "rarrap;": "\u2975",
+ "rarrb;": "\u21e5",
+ "rarrbfs;": "\u2920",
+ "rarrc;": "\u2933",
+ "rarrfs;": "\u291e",
+ "rarrhk;": "\u21aa",
+ "rarrlp;": "\u21ac",
+ "rarrpl;": "\u2945",
+ "rarrsim;": "\u2974",
+ "rarrtl;": "\u21a3",
+ "rarrw;": "\u219d",
+ "ratail;": "\u291a",
+ "ratio;": "\u2236",
+ "rationals;": "\u211a",
+ "rbarr;": "\u290d",
+ "rbbrk;": "\u2773",
+ "rbrace;": "}",
+ "rbrack;": "]",
+ "rbrke;": "\u298c",
+ "rbrksld;": "\u298e",
+ "rbrkslu;": "\u2990",
+ "rcaron;": "\u0159",
+ "rcedil;": "\u0157",
+ "rceil;": "\u2309",
+ "rcub;": "}",
+ "rcy;": "\u0440",
+ "rdca;": "\u2937",
+ "rdldhar;": "\u2969",
+ "rdquo;": "\u201d",
+ "rdquor;": "\u201d",
+ "rdsh;": "\u21b3",
+ "real;": "\u211c",
+ "realine;": "\u211b",
+ "realpart;": "\u211c",
+ "reals;": "\u211d",
+ "rect;": "\u25ad",
+ "reg": "\xae",
+ "reg;": "\xae",
+ "rfisht;": "\u297d",
+ "rfloor;": "\u230b",
+ "rfr;": "\U0001d52f",
+ "rhard;": "\u21c1",
+ "rharu;": "\u21c0",
+ "rharul;": "\u296c",
+ "rho;": "\u03c1",
+ "rhov;": "\u03f1",
+ "rightarrow;": "\u2192",
+ "rightarrowtail;": "\u21a3",
+ "rightharpoondown;": "\u21c1",
+ "rightharpoonup;": "\u21c0",
+ "rightleftarrows;": "\u21c4",
+ "rightleftharpoons;": "\u21cc",
+ "rightrightarrows;": "\u21c9",
+ "rightsquigarrow;": "\u219d",
+ "rightthreetimes;": "\u22cc",
+ "ring;": "\u02da",
+ "risingdotseq;": "\u2253",
+ "rlarr;": "\u21c4",
+ "rlhar;": "\u21cc",
+ "rlm;": "\u200f",
+ "rmoust;": "\u23b1",
+ "rmoustache;": "\u23b1",
+ "rnmid;": "\u2aee",
+ "roang;": "\u27ed",
+ "roarr;": "\u21fe",
+ "robrk;": "\u27e7",
+ "ropar;": "\u2986",
+ "ropf;": "\U0001d563",
+ "roplus;": "\u2a2e",
+ "rotimes;": "\u2a35",
+ "rpar;": ")",
+ "rpargt;": "\u2994",
+ "rppolint;": "\u2a12",
+ "rrarr;": "\u21c9",
+ "rsaquo;": "\u203a",
+ "rscr;": "\U0001d4c7",
+ "rsh;": "\u21b1",
+ "rsqb;": "]",
+ "rsquo;": "\u2019",
+ "rsquor;": "\u2019",
+ "rthree;": "\u22cc",
+ "rtimes;": "\u22ca",
+ "rtri;": "\u25b9",
+ "rtrie;": "\u22b5",
+ "rtrif;": "\u25b8",
+ "rtriltri;": "\u29ce",
+ "ruluhar;": "\u2968",
+ "rx;": "\u211e",
+ "sacute;": "\u015b",
+ "sbquo;": "\u201a",
+ "sc;": "\u227b",
+ "scE;": "\u2ab4",
+ "scap;": "\u2ab8",
+ "scaron;": "\u0161",
+ "sccue;": "\u227d",
+ "sce;": "\u2ab0",
+ "scedil;": "\u015f",
+ "scirc;": "\u015d",
+ "scnE;": "\u2ab6",
+ "scnap;": "\u2aba",
+ "scnsim;": "\u22e9",
+ "scpolint;": "\u2a13",
+ "scsim;": "\u227f",
+ "scy;": "\u0441",
+ "sdot;": "\u22c5",
+ "sdotb;": "\u22a1",
+ "sdote;": "\u2a66",
+ "seArr;": "\u21d8",
+ "searhk;": "\u2925",
+ "searr;": "\u2198",
+ "searrow;": "\u2198",
+ "sect": "\xa7",
+ "sect;": "\xa7",
+ "semi;": ";",
+ "seswar;": "\u2929",
+ "setminus;": "\u2216",
+ "setmn;": "\u2216",
+ "sext;": "\u2736",
+ "sfr;": "\U0001d530",
+ "sfrown;": "\u2322",
+ "sharp;": "\u266f",
+ "shchcy;": "\u0449",
+ "shcy;": "\u0448",
+ "shortmid;": "\u2223",
+ "shortparallel;": "\u2225",
+ "shy": "\xad",
+ "shy;": "\xad",
+ "sigma;": "\u03c3",
+ "sigmaf;": "\u03c2",
+ "sigmav;": "\u03c2",
+ "sim;": "\u223c",
+ "simdot;": "\u2a6a",
+ "sime;": "\u2243",
+ "simeq;": "\u2243",
+ "simg;": "\u2a9e",
+ "simgE;": "\u2aa0",
+ "siml;": "\u2a9d",
+ "simlE;": "\u2a9f",
+ "simne;": "\u2246",
+ "simplus;": "\u2a24",
+ "simrarr;": "\u2972",
+ "slarr;": "\u2190",
+ "smallsetminus;": "\u2216",
+ "smashp;": "\u2a33",
+ "smeparsl;": "\u29e4",
+ "smid;": "\u2223",
+ "smile;": "\u2323",
+ "smt;": "\u2aaa",
+ "smte;": "\u2aac",
+ "smtes;": "\u2aac\ufe00",
+ "softcy;": "\u044c",
+ "sol;": "/",
+ "solb;": "\u29c4",
+ "solbar;": "\u233f",
+ "sopf;": "\U0001d564",
+ "spades;": "\u2660",
+ "spadesuit;": "\u2660",
+ "spar;": "\u2225",
+ "sqcap;": "\u2293",
+ "sqcaps;": "\u2293\ufe00",
+ "sqcup;": "\u2294",
+ "sqcups;": "\u2294\ufe00",
+ "sqsub;": "\u228f",
+ "sqsube;": "\u2291",
+ "sqsubset;": "\u228f",
+ "sqsubseteq;": "\u2291",
+ "sqsup;": "\u2290",
+ "sqsupe;": "\u2292",
+ "sqsupset;": "\u2290",
+ "sqsupseteq;": "\u2292",
+ "squ;": "\u25a1",
+ "square;": "\u25a1",
+ "squarf;": "\u25aa",
+ "squf;": "\u25aa",
+ "srarr;": "\u2192",
+ "sscr;": "\U0001d4c8",
+ "ssetmn;": "\u2216",
+ "ssmile;": "\u2323",
+ "sstarf;": "\u22c6",
+ "star;": "\u2606",
+ "starf;": "\u2605",
+ "straightepsilon;": "\u03f5",
+ "straightphi;": "\u03d5",
+ "strns;": "\xaf",
+ "sub;": "\u2282",
+ "subE;": "\u2ac5",
+ "subdot;": "\u2abd",
+ "sube;": "\u2286",
+ "subedot;": "\u2ac3",
+ "submult;": "\u2ac1",
+ "subnE;": "\u2acb",
+ "subne;": "\u228a",
+ "subplus;": "\u2abf",
+ "subrarr;": "\u2979",
+ "subset;": "\u2282",
+ "subseteq;": "\u2286",
+ "subseteqq;": "\u2ac5",
+ "subsetneq;": "\u228a",
+ "subsetneqq;": "\u2acb",
+ "subsim;": "\u2ac7",
+ "subsub;": "\u2ad5",
+ "subsup;": "\u2ad3",
+ "succ;": "\u227b",
+ "succapprox;": "\u2ab8",
+ "succcurlyeq;": "\u227d",
+ "succeq;": "\u2ab0",
+ "succnapprox;": "\u2aba",
+ "succneqq;": "\u2ab6",
+ "succnsim;": "\u22e9",
+ "succsim;": "\u227f",
+ "sum;": "\u2211",
+ "sung;": "\u266a",
+ "sup1": "\xb9",
+ "sup1;": "\xb9",
+ "sup2": "\xb2",
+ "sup2;": "\xb2",
+ "sup3": "\xb3",
+ "sup3;": "\xb3",
+ "sup;": "\u2283",
+ "supE;": "\u2ac6",
+ "supdot;": "\u2abe",
+ "supdsub;": "\u2ad8",
+ "supe;": "\u2287",
+ "supedot;": "\u2ac4",
+ "suphsol;": "\u27c9",
+ "suphsub;": "\u2ad7",
+ "suplarr;": "\u297b",
+ "supmult;": "\u2ac2",
+ "supnE;": "\u2acc",
+ "supne;": "\u228b",
+ "supplus;": "\u2ac0",
+ "supset;": "\u2283",
+ "supseteq;": "\u2287",
+ "supseteqq;": "\u2ac6",
+ "supsetneq;": "\u228b",
+ "supsetneqq;": "\u2acc",
+ "supsim;": "\u2ac8",
+ "supsub;": "\u2ad4",
+ "supsup;": "\u2ad6",
+ "swArr;": "\u21d9",
+ "swarhk;": "\u2926",
+ "swarr;": "\u2199",
+ "swarrow;": "\u2199",
+ "swnwar;": "\u292a",
+ "szlig": "\xdf",
+ "szlig;": "\xdf",
+ "target;": "\u2316",
+ "tau;": "\u03c4",
+ "tbrk;": "\u23b4",
+ "tcaron;": "\u0165",
+ "tcedil;": "\u0163",
+ "tcy;": "\u0442",
+ "tdot;": "\u20db",
+ "telrec;": "\u2315",
+ "tfr;": "\U0001d531",
+ "there4;": "\u2234",
+ "therefore;": "\u2234",
+ "theta;": "\u03b8",
+ "thetasym;": "\u03d1",
+ "thetav;": "\u03d1",
+ "thickapprox;": "\u2248",
+ "thicksim;": "\u223c",
+ "thinsp;": "\u2009",
+ "thkap;": "\u2248",
+ "thksim;": "\u223c",
+ "thorn": "\xfe",
+ "thorn;": "\xfe",
+ "tilde;": "\u02dc",
+ "times": "\xd7",
+ "times;": "\xd7",
+ "timesb;": "\u22a0",
+ "timesbar;": "\u2a31",
+ "timesd;": "\u2a30",
+ "tint;": "\u222d",
+ "toea;": "\u2928",
+ "top;": "\u22a4",
+ "topbot;": "\u2336",
+ "topcir;": "\u2af1",
+ "topf;": "\U0001d565",
+ "topfork;": "\u2ada",
+ "tosa;": "\u2929",
+ "tprime;": "\u2034",
+ "trade;": "\u2122",
+ "triangle;": "\u25b5",
+ "triangledown;": "\u25bf",
+ "triangleleft;": "\u25c3",
+ "trianglelefteq;": "\u22b4",
+ "triangleq;": "\u225c",
+ "triangleright;": "\u25b9",
+ "trianglerighteq;": "\u22b5",
+ "tridot;": "\u25ec",
+ "trie;": "\u225c",
+ "triminus;": "\u2a3a",
+ "triplus;": "\u2a39",
+ "trisb;": "\u29cd",
+ "tritime;": "\u2a3b",
+ "trpezium;": "\u23e2",
+ "tscr;": "\U0001d4c9",
+ "tscy;": "\u0446",
+ "tshcy;": "\u045b",
+ "tstrok;": "\u0167",
+ "twixt;": "\u226c",
+ "twoheadleftarrow;": "\u219e",
+ "twoheadrightarrow;": "\u21a0",
+ "uArr;": "\u21d1",
+ "uHar;": "\u2963",
+ "uacute": "\xfa",
+ "uacute;": "\xfa",
+ "uarr;": "\u2191",
+ "ubrcy;": "\u045e",
+ "ubreve;": "\u016d",
+ "ucirc": "\xfb",
+ "ucirc;": "\xfb",
+ "ucy;": "\u0443",
+ "udarr;": "\u21c5",
+ "udblac;": "\u0171",
+ "udhar;": "\u296e",
+ "ufisht;": "\u297e",
+ "ufr;": "\U0001d532",
+ "ugrave": "\xf9",
+ "ugrave;": "\xf9",
+ "uharl;": "\u21bf",
+ "uharr;": "\u21be",
+ "uhblk;": "\u2580",
+ "ulcorn;": "\u231c",
+ "ulcorner;": "\u231c",
+ "ulcrop;": "\u230f",
+ "ultri;": "\u25f8",
+ "umacr;": "\u016b",
+ "uml": "\xa8",
+ "uml;": "\xa8",
+ "uogon;": "\u0173",
+ "uopf;": "\U0001d566",
+ "uparrow;": "\u2191",
+ "updownarrow;": "\u2195",
+ "upharpoonleft;": "\u21bf",
+ "upharpoonright;": "\u21be",
+ "uplus;": "\u228e",
+ "upsi;": "\u03c5",
+ "upsih;": "\u03d2",
+ "upsilon;": "\u03c5",
+ "upuparrows;": "\u21c8",
+ "urcorn;": "\u231d",
+ "urcorner;": "\u231d",
+ "urcrop;": "\u230e",
+ "uring;": "\u016f",
+ "urtri;": "\u25f9",
+ "uscr;": "\U0001d4ca",
+ "utdot;": "\u22f0",
+ "utilde;": "\u0169",
+ "utri;": "\u25b5",
+ "utrif;": "\u25b4",
+ "uuarr;": "\u21c8",
+ "uuml": "\xfc",
+ "uuml;": "\xfc",
+ "uwangle;": "\u29a7",
+ "vArr;": "\u21d5",
+ "vBar;": "\u2ae8",
+ "vBarv;": "\u2ae9",
+ "vDash;": "\u22a8",
+ "vangrt;": "\u299c",
+ "varepsilon;": "\u03f5",
+ "varkappa;": "\u03f0",
+ "varnothing;": "\u2205",
+ "varphi;": "\u03d5",
+ "varpi;": "\u03d6",
+ "varpropto;": "\u221d",
+ "varr;": "\u2195",
+ "varrho;": "\u03f1",
+ "varsigma;": "\u03c2",
+ "varsubsetneq;": "\u228a\ufe00",
+ "varsubsetneqq;": "\u2acb\ufe00",
+ "varsupsetneq;": "\u228b\ufe00",
+ "varsupsetneqq;": "\u2acc\ufe00",
+ "vartheta;": "\u03d1",
+ "vartriangleleft;": "\u22b2",
+ "vartriangleright;": "\u22b3",
+ "vcy;": "\u0432",
+ "vdash;": "\u22a2",
+ "vee;": "\u2228",
+ "veebar;": "\u22bb",
+ "veeeq;": "\u225a",
+ "vellip;": "\u22ee",
+ "verbar;": "|",
+ "vert;": "|",
+ "vfr;": "\U0001d533",
+ "vltri;": "\u22b2",
+ "vnsub;": "\u2282\u20d2",
+ "vnsup;": "\u2283\u20d2",
+ "vopf;": "\U0001d567",
+ "vprop;": "\u221d",
+ "vrtri;": "\u22b3",
+ "vscr;": "\U0001d4cb",
+ "vsubnE;": "\u2acb\ufe00",
+ "vsubne;": "\u228a\ufe00",
+ "vsupnE;": "\u2acc\ufe00",
+ "vsupne;": "\u228b\ufe00",
+ "vzigzag;": "\u299a",
+ "wcirc;": "\u0175",
+ "wedbar;": "\u2a5f",
+ "wedge;": "\u2227",
+ "wedgeq;": "\u2259",
+ "weierp;": "\u2118",
+ "wfr;": "\U0001d534",
+ "wopf;": "\U0001d568",
+ "wp;": "\u2118",
+ "wr;": "\u2240",
+ "wreath;": "\u2240",
+ "wscr;": "\U0001d4cc",
+ "xcap;": "\u22c2",
+ "xcirc;": "\u25ef",
+ "xcup;": "\u22c3",
+ "xdtri;": "\u25bd",
+ "xfr;": "\U0001d535",
+ "xhArr;": "\u27fa",
+ "xharr;": "\u27f7",
+ "xi;": "\u03be",
+ "xlArr;": "\u27f8",
+ "xlarr;": "\u27f5",
+ "xmap;": "\u27fc",
+ "xnis;": "\u22fb",
+ "xodot;": "\u2a00",
+ "xopf;": "\U0001d569",
+ "xoplus;": "\u2a01",
+ "xotime;": "\u2a02",
+ "xrArr;": "\u27f9",
+ "xrarr;": "\u27f6",
+ "xscr;": "\U0001d4cd",
+ "xsqcup;": "\u2a06",
+ "xuplus;": "\u2a04",
+ "xutri;": "\u25b3",
+ "xvee;": "\u22c1",
+ "xwedge;": "\u22c0",
+ "yacute": "\xfd",
+ "yacute;": "\xfd",
+ "yacy;": "\u044f",
+ "ycirc;": "\u0177",
+ "ycy;": "\u044b",
+ "yen": "\xa5",
+ "yen;": "\xa5",
+ "yfr;": "\U0001d536",
+ "yicy;": "\u0457",
+ "yopf;": "\U0001d56a",
+ "yscr;": "\U0001d4ce",
+ "yucy;": "\u044e",
+ "yuml": "\xff",
+ "yuml;": "\xff",
+ "zacute;": "\u017a",
+ "zcaron;": "\u017e",
+ "zcy;": "\u0437",
+ "zdot;": "\u017c",
+ "zeetrf;": "\u2128",
+ "zeta;": "\u03b6",
+ "zfr;": "\U0001d537",
+ "zhcy;": "\u0436",
+ "zigrarr;": "\u21dd",
+ "zopf;": "\U0001d56b",
+ "zscr;": "\U0001d4cf",
+ "zwj;": "\u200d",
+ "zwnj;": "\u200c",
+}
+
+replacementCharacters = {
+ 0x0: "\uFFFD",
+ 0x0d: "\u000D",
+ 0x80: "\u20AC",
+ 0x81: "\u0081",
+ 0x81: "\u0081",
+ 0x82: "\u201A",
+ 0x83: "\u0192",
+ 0x84: "\u201E",
+ 0x85: "\u2026",
+ 0x86: "\u2020",
+ 0x87: "\u2021",
+ 0x88: "\u02C6",
+ 0x89: "\u2030",
+ 0x8A: "\u0160",
+ 0x8B: "\u2039",
+ 0x8C: "\u0152",
+ 0x8D: "\u008D",
+ 0x8E: "\u017D",
+ 0x8F: "\u008F",
+ 0x90: "\u0090",
+ 0x91: "\u2018",
+ 0x92: "\u2019",
+ 0x93: "\u201C",
+ 0x94: "\u201D",
+ 0x95: "\u2022",
+ 0x96: "\u2013",
+ 0x97: "\u2014",
+ 0x98: "\u02DC",
+ 0x99: "\u2122",
+ 0x9A: "\u0161",
+ 0x9B: "\u203A",
+ 0x9C: "\u0153",
+ 0x9D: "\u009D",
+ 0x9E: "\u017E",
+ 0x9F: "\u0178",
+}
+
+encodings = {
+ '437': 'cp437',
+ '850': 'cp850',
+ '852': 'cp852',
+ '855': 'cp855',
+ '857': 'cp857',
+ '860': 'cp860',
+ '861': 'cp861',
+ '862': 'cp862',
+ '863': 'cp863',
+ '865': 'cp865',
+ '866': 'cp866',
+ '869': 'cp869',
+ 'ansix341968': 'ascii',
+ 'ansix341986': 'ascii',
+ 'arabic': 'iso8859-6',
+ 'ascii': 'ascii',
+ 'asmo708': 'iso8859-6',
+ 'big5': 'big5',
+ 'big5hkscs': 'big5hkscs',
+ 'chinese': 'gbk',
+ 'cp037': 'cp037',
+ 'cp1026': 'cp1026',
+ 'cp154': 'ptcp154',
+ 'cp367': 'ascii',
+ 'cp424': 'cp424',
+ 'cp437': 'cp437',
+ 'cp500': 'cp500',
+ 'cp775': 'cp775',
+ 'cp819': 'windows-1252',
+ 'cp850': 'cp850',
+ 'cp852': 'cp852',
+ 'cp855': 'cp855',
+ 'cp857': 'cp857',
+ 'cp860': 'cp860',
+ 'cp861': 'cp861',
+ 'cp862': 'cp862',
+ 'cp863': 'cp863',
+ 'cp864': 'cp864',
+ 'cp865': 'cp865',
+ 'cp866': 'cp866',
+ 'cp869': 'cp869',
+ 'cp936': 'gbk',
+ 'cpgr': 'cp869',
+ 'cpis': 'cp861',
+ 'csascii': 'ascii',
+ 'csbig5': 'big5',
+ 'cseuckr': 'cp949',
+ 'cseucpkdfmtjapanese': 'euc_jp',
+ 'csgb2312': 'gbk',
+ 'cshproman8': 'hp-roman8',
+ 'csibm037': 'cp037',
+ 'csibm1026': 'cp1026',
+ 'csibm424': 'cp424',
+ 'csibm500': 'cp500',
+ 'csibm855': 'cp855',
+ 'csibm857': 'cp857',
+ 'csibm860': 'cp860',
+ 'csibm861': 'cp861',
+ 'csibm863': 'cp863',
+ 'csibm864': 'cp864',
+ 'csibm865': 'cp865',
+ 'csibm866': 'cp866',
+ 'csibm869': 'cp869',
+ 'csiso2022jp': 'iso2022_jp',
+ 'csiso2022jp2': 'iso2022_jp_2',
+ 'csiso2022kr': 'iso2022_kr',
+ 'csiso58gb231280': 'gbk',
+ 'csisolatin1': 'windows-1252',
+ 'csisolatin2': 'iso8859-2',
+ 'csisolatin3': 'iso8859-3',
+ 'csisolatin4': 'iso8859-4',
+ 'csisolatin5': 'windows-1254',
+ 'csisolatin6': 'iso8859-10',
+ 'csisolatinarabic': 'iso8859-6',
+ 'csisolatincyrillic': 'iso8859-5',
+ 'csisolatingreek': 'iso8859-7',
+ 'csisolatinhebrew': 'iso8859-8',
+ 'cskoi8r': 'koi8-r',
+ 'csksc56011987': 'cp949',
+ 'cspc775baltic': 'cp775',
+ 'cspc850multilingual': 'cp850',
+ 'cspc862latinhebrew': 'cp862',
+ 'cspc8codepage437': 'cp437',
+ 'cspcp852': 'cp852',
+ 'csptcp154': 'ptcp154',
+ 'csshiftjis': 'shift_jis',
+ 'csunicode11utf7': 'utf-7',
+ 'cyrillic': 'iso8859-5',
+ 'cyrillicasian': 'ptcp154',
+ 'ebcdiccpbe': 'cp500',
+ 'ebcdiccpca': 'cp037',
+ 'ebcdiccpch': 'cp500',
+ 'ebcdiccphe': 'cp424',
+ 'ebcdiccpnl': 'cp037',
+ 'ebcdiccpus': 'cp037',
+ 'ebcdiccpwt': 'cp037',
+ 'ecma114': 'iso8859-6',
+ 'ecma118': 'iso8859-7',
+ 'elot928': 'iso8859-7',
+ 'eucjp': 'euc_jp',
+ 'euckr': 'cp949',
+ 'extendedunixcodepackedformatforjapanese': 'euc_jp',
+ 'gb18030': 'gb18030',
+ 'gb2312': 'gbk',
+ 'gb231280': 'gbk',
+ 'gbk': 'gbk',
+ 'greek': 'iso8859-7',
+ 'greek8': 'iso8859-7',
+ 'hebrew': 'iso8859-8',
+ 'hproman8': 'hp-roman8',
+ 'hzgb2312': 'hz',
+ 'ibm037': 'cp037',
+ 'ibm1026': 'cp1026',
+ 'ibm367': 'ascii',
+ 'ibm424': 'cp424',
+ 'ibm437': 'cp437',
+ 'ibm500': 'cp500',
+ 'ibm775': 'cp775',
+ 'ibm819': 'windows-1252',
+ 'ibm850': 'cp850',
+ 'ibm852': 'cp852',
+ 'ibm855': 'cp855',
+ 'ibm857': 'cp857',
+ 'ibm860': 'cp860',
+ 'ibm861': 'cp861',
+ 'ibm862': 'cp862',
+ 'ibm863': 'cp863',
+ 'ibm864': 'cp864',
+ 'ibm865': 'cp865',
+ 'ibm866': 'cp866',
+ 'ibm869': 'cp869',
+ 'iso2022jp': 'iso2022_jp',
+ 'iso2022jp2': 'iso2022_jp_2',
+ 'iso2022kr': 'iso2022_kr',
+ 'iso646irv1991': 'ascii',
+ 'iso646us': 'ascii',
+ 'iso88591': 'windows-1252',
+ 'iso885910': 'iso8859-10',
+ 'iso8859101992': 'iso8859-10',
+ 'iso885911987': 'windows-1252',
+ 'iso885913': 'iso8859-13',
+ 'iso885914': 'iso8859-14',
+ 'iso8859141998': 'iso8859-14',
+ 'iso885915': 'iso8859-15',
+ 'iso885916': 'iso8859-16',
+ 'iso8859162001': 'iso8859-16',
+ 'iso88592': 'iso8859-2',
+ 'iso885921987': 'iso8859-2',
+ 'iso88593': 'iso8859-3',
+ 'iso885931988': 'iso8859-3',
+ 'iso88594': 'iso8859-4',
+ 'iso885941988': 'iso8859-4',
+ 'iso88595': 'iso8859-5',
+ 'iso885951988': 'iso8859-5',
+ 'iso88596': 'iso8859-6',
+ 'iso885961987': 'iso8859-6',
+ 'iso88597': 'iso8859-7',
+ 'iso885971987': 'iso8859-7',
+ 'iso88598': 'iso8859-8',
+ 'iso885981988': 'iso8859-8',
+ 'iso88599': 'windows-1254',
+ 'iso885991989': 'windows-1254',
+ 'isoceltic': 'iso8859-14',
+ 'isoir100': 'windows-1252',
+ 'isoir101': 'iso8859-2',
+ 'isoir109': 'iso8859-3',
+ 'isoir110': 'iso8859-4',
+ 'isoir126': 'iso8859-7',
+ 'isoir127': 'iso8859-6',
+ 'isoir138': 'iso8859-8',
+ 'isoir144': 'iso8859-5',
+ 'isoir148': 'windows-1254',
+ 'isoir149': 'cp949',
+ 'isoir157': 'iso8859-10',
+ 'isoir199': 'iso8859-14',
+ 'isoir226': 'iso8859-16',
+ 'isoir58': 'gbk',
+ 'isoir6': 'ascii',
+ 'koi8r': 'koi8-r',
+ 'koi8u': 'koi8-u',
+ 'korean': 'cp949',
+ 'ksc5601': 'cp949',
+ 'ksc56011987': 'cp949',
+ 'ksc56011989': 'cp949',
+ 'l1': 'windows-1252',
+ 'l10': 'iso8859-16',
+ 'l2': 'iso8859-2',
+ 'l3': 'iso8859-3',
+ 'l4': 'iso8859-4',
+ 'l5': 'windows-1254',
+ 'l6': 'iso8859-10',
+ 'l8': 'iso8859-14',
+ 'latin1': 'windows-1252',
+ 'latin10': 'iso8859-16',
+ 'latin2': 'iso8859-2',
+ 'latin3': 'iso8859-3',
+ 'latin4': 'iso8859-4',
+ 'latin5': 'windows-1254',
+ 'latin6': 'iso8859-10',
+ 'latin8': 'iso8859-14',
+ 'latin9': 'iso8859-15',
+ 'ms936': 'gbk',
+ 'mskanji': 'shift_jis',
+ 'pt154': 'ptcp154',
+ 'ptcp154': 'ptcp154',
+ 'r8': 'hp-roman8',
+ 'roman8': 'hp-roman8',
+ 'shiftjis': 'shift_jis',
+ 'tis620': 'cp874',
+ 'unicode11utf7': 'utf-7',
+ 'us': 'ascii',
+ 'usascii': 'ascii',
+ 'utf16': 'utf-16',
+ 'utf16be': 'utf-16-be',
+ 'utf16le': 'utf-16-le',
+ 'utf8': 'utf-8',
+ 'windows1250': 'cp1250',
+ 'windows1251': 'cp1251',
+ 'windows1252': 'cp1252',
+ 'windows1253': 'cp1253',
+ 'windows1254': 'cp1254',
+ 'windows1255': 'cp1255',
+ 'windows1256': 'cp1256',
+ 'windows1257': 'cp1257',
+ 'windows1258': 'cp1258',
+ 'windows936': 'gbk',
+ 'x-x-big5': 'big5'}
+
+tokenTypes = {
+ "Doctype": 0,
+ "Characters": 1,
+ "SpaceCharacters": 2,
+ "StartTag": 3,
+ "EndTag": 4,
+ "EmptyTag": 5,
+ "Comment": 6,
+ "ParseError": 7
+}
+
+tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
+ tokenTypes["EmptyTag"]))
+
+
+prefixes = dict([(v, k) for k, v in namespaces.items()])
+prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
+
+
+class DataLossWarning(UserWarning):
+ pass
+
+
+class ReparseException(Exception):
+ pass
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/__init__.py
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/_base.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/_base.py
new file mode 100644
index 000000000..c7dbaed0f
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/_base.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import, division, unicode_literals
+
+
+class Filter(object):
+ def __init__(self, source):
+ self.source = source
+
+ def __iter__(self):
+ return iter(self.source)
+
+ def __getattr__(self, name):
+ return getattr(self.source, name)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/alphabeticalattributes.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/alphabeticalattributes.py
new file mode 100644
index 000000000..fed6996c1
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/alphabeticalattributes.py
@@ -0,0 +1,20 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import _base
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
+
+
+class Filter(_base.Filter):
+ def __iter__(self):
+ for token in _base.Filter.__iter__(self):
+ if token["type"] in ("StartTag", "EmptyTag"):
+ attrs = OrderedDict()
+ for name, value in sorted(token["data"].items(),
+ key=lambda x: x[0]):
+ attrs[name] = value
+ token["data"] = attrs
+ yield token
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/inject_meta_charset.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/inject_meta_charset.py
new file mode 100644
index 000000000..ca33b70b5
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/inject_meta_charset.py
@@ -0,0 +1,65 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import _base
+
+
+class Filter(_base.Filter):
+ def __init__(self, source, encoding):
+ _base.Filter.__init__(self, source)
+ self.encoding = encoding
+
+ def __iter__(self):
+ state = "pre_head"
+ meta_found = (self.encoding is None)
+ pending = []
+
+ for token in _base.Filter.__iter__(self):
+ type = token["type"]
+ if type == "StartTag":
+ if token["name"].lower() == "head":
+ state = "in_head"
+
+ elif type == "EmptyTag":
+ if token["name"].lower() == "meta":
+ # replace charset with actual encoding
+ has_http_equiv_content_type = False
+ for (namespace, name), value in token["data"].items():
+ if namespace is not None:
+ continue
+ elif name.lower() == 'charset':
+ token["data"][(namespace, name)] = self.encoding
+ meta_found = True
+ break
+ elif name == 'http-equiv' and value.lower() == 'content-type':
+ has_http_equiv_content_type = True
+ else:
+ if has_http_equiv_content_type and (None, "content") in token["data"]:
+ token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
+ meta_found = True
+
+ elif token["name"].lower() == "head" and not meta_found:
+ # insert meta into empty head
+ yield {"type": "StartTag", "name": "head",
+ "data": token["data"]}
+ yield {"type": "EmptyTag", "name": "meta",
+ "data": {(None, "charset"): self.encoding}}
+ yield {"type": "EndTag", "name": "head"}
+ meta_found = True
+ continue
+
+ elif type == "EndTag":
+ if token["name"].lower() == "head" and pending:
+ # insert meta into head (if necessary) and flush pending queue
+ yield pending.pop(0)
+ if not meta_found:
+ yield {"type": "EmptyTag", "name": "meta",
+ "data": {(None, "charset"): self.encoding}}
+ while pending:
+ yield pending.pop(0)
+ meta_found = True
+ state = "post_head"
+
+ if state == "in_head":
+ pending.append(token)
+ else:
+ yield token
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/lint.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/lint.py
new file mode 100644
index 000000000..7cc99a4ba
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/lint.py
@@ -0,0 +1,93 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from gettext import gettext
+_ = gettext
+
+from . import _base
+from ..constants import cdataElements, rcdataElements, voidElements
+
+from ..constants import spaceCharacters
+spaceCharacters = "".join(spaceCharacters)
+
+
+class LintError(Exception):
+ pass
+
+
+class Filter(_base.Filter):
+ def __iter__(self):
+ open_elements = []
+ contentModelFlag = "PCDATA"
+ for token in _base.Filter.__iter__(self):
+ type = token["type"]
+ if type in ("StartTag", "EmptyTag"):
+ name = token["name"]
+ if contentModelFlag != "PCDATA":
+ raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
+ if not isinstance(name, str):
+ raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
+ if not name:
+ raise LintError(_("Empty tag name"))
+ if type == "StartTag" and name in voidElements:
+ raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
+ elif type == "EmptyTag" and name not in voidElements:
+ raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
+ if type == "StartTag":
+ open_elements.append(name)
+ for name, value in token["data"]:
+ if not isinstance(name, str):
+ raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
+ if not name:
+ raise LintError(_("Empty attribute name"))
+ if not isinstance(value, str):
+ raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
+ if name in cdataElements:
+ contentModelFlag = "CDATA"
+ elif name in rcdataElements:
+ contentModelFlag = "RCDATA"
+ elif name == "plaintext":
+ contentModelFlag = "PLAINTEXT"
+
+ elif type == "EndTag":
+ name = token["name"]
+ if not isinstance(name, str):
+ raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
+ if not name:
+ raise LintError(_("Empty tag name"))
+ if name in voidElements:
+ raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
+ start_name = open_elements.pop()
+ if start_name != name:
+ raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
+ contentModelFlag = "PCDATA"
+
+ elif type == "Comment":
+ if contentModelFlag != "PCDATA":
+ raise LintError(_("Comment not in PCDATA content model flag"))
+
+ elif type in ("Characters", "SpaceCharacters"):
+ data = token["data"]
+ if not isinstance(data, str):
+ raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
+ if not data:
+ raise LintError(_("%(type)s token with empty data") % {"type": type})
+ if type == "SpaceCharacters":
+ data = data.strip(spaceCharacters)
+ if data:
+ raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
+
+ elif type == "Doctype":
+ name = token["name"]
+ if contentModelFlag != "PCDATA":
+ raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
+ if not isinstance(name, str):
+ raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
+ # XXX: what to do with token["data"] ?
+
+ elif type in ("ParseError", "SerializeError"):
+ pass
+
+ else:
+ raise LintError(_("Unknown token type: %(type)s") % {"type": type})
+
+ yield token
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/optionaltags.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/optionaltags.py
new file mode 100644
index 000000000..fefe0b309
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/optionaltags.py
@@ -0,0 +1,205 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import _base
+
+
+class Filter(_base.Filter):
+ def slider(self):
+ previous1 = previous2 = None
+ for token in self.source:
+ if previous1 is not None:
+ yield previous2, previous1, token
+ previous2 = previous1
+ previous1 = token
+ yield previous2, previous1, None
+
+ def __iter__(self):
+ for previous, token, next in self.slider():
+ type = token["type"]
+ if type == "StartTag":
+ if (token["data"] or
+ not self.is_optional_start(token["name"], previous, next)):
+ yield token
+ elif type == "EndTag":
+ if not self.is_optional_end(token["name"], next):
+ yield token
+ else:
+ yield token
+
+ def is_optional_start(self, tagname, previous, next):
+ type = next and next["type"] or None
+ if tagname in 'html':
+ # An html element's start tag may be omitted if the first thing
+ # inside the html element is not a space character or a comment.
+ return type not in ("Comment", "SpaceCharacters")
+ elif tagname == 'head':
+ # A head element's start tag may be omitted if the first thing
+ # inside the head element is an element.
+ # XXX: we also omit the start tag if the head element is empty
+ if type in ("StartTag", "EmptyTag"):
+ return True
+ elif type == "EndTag":
+ return next["name"] == "head"
+ elif tagname == 'body':
+ # A body element's start tag may be omitted if the first thing
+ # inside the body element is not a space character or a comment,
+ # except if the first thing inside the body element is a script
+ # or style element and the node immediately preceding the body
+ # element is a head element whose end tag has been omitted.
+ if type in ("Comment", "SpaceCharacters"):
+ return False
+ elif type == "StartTag":
+ # XXX: we do not look at the preceding event, so we never omit
+ # the body element's start tag if it's followed by a script or
+ # a style element.
+ return next["name"] not in ('script', 'style')
+ else:
+ return True
+ elif tagname == 'colgroup':
+ # A colgroup element's start tag may be omitted if the first thing
+ # inside the colgroup element is a col element, and if the element
+ # is not immediately preceeded by another colgroup element whose
+ # end tag has been omitted.
+ if type in ("StartTag", "EmptyTag"):
+ # XXX: we do not look at the preceding event, so instead we never
+ # omit the colgroup element's end tag when it is immediately
+ # followed by another colgroup element. See is_optional_end.
+ return next["name"] == "col"
+ else:
+ return False
+ elif tagname == 'tbody':
+ # A tbody element's start tag may be omitted if the first thing
+ # inside the tbody element is a tr element, and if the element is
+ # not immediately preceeded by a tbody, thead, or tfoot element
+ # whose end tag has been omitted.
+ if type == "StartTag":
+ # omit the thead and tfoot elements' end tag when they are
+ # immediately followed by a tbody element. See is_optional_end.
+ if previous and previous['type'] == 'EndTag' and \
+ previous['name'] in ('tbody', 'thead', 'tfoot'):
+ return False
+ return next["name"] == 'tr'
+ else:
+ return False
+ return False
+
+ def is_optional_end(self, tagname, next):
+ type = next and next["type"] or None
+ if tagname in ('html', 'head', 'body'):
+ # An html element's end tag may be omitted if the html element
+ # is not immediately followed by a space character or a comment.
+ return type not in ("Comment", "SpaceCharacters")
+ elif tagname in ('li', 'optgroup', 'tr'):
+ # A li element's end tag may be omitted if the li element is
+ # immediately followed by another li element or if there is
+ # no more content in the parent element.
+ # An optgroup element's end tag may be omitted if the optgroup
+ # element is immediately followed by another optgroup element,
+ # or if there is no more content in the parent element.
+ # A tr element's end tag may be omitted if the tr element is
+ # immediately followed by another tr element, or if there is
+ # no more content in the parent element.
+ if type == "StartTag":
+ return next["name"] == tagname
+ else:
+ return type == "EndTag" or type is None
+ elif tagname in ('dt', 'dd'):
+ # A dt element's end tag may be omitted if the dt element is
+ # immediately followed by another dt element or a dd element.
+ # A dd element's end tag may be omitted if the dd element is
+ # immediately followed by another dd element or a dt element,
+ # or if there is no more content in the parent element.
+ if type == "StartTag":
+ return next["name"] in ('dt', 'dd')
+ elif tagname == 'dd':
+ return type == "EndTag" or type is None
+ else:
+ return False
+ elif tagname == 'p':
+ # A p element's end tag may be omitted if the p element is
+ # immediately followed by an address, article, aside,
+ # blockquote, datagrid, dialog, dir, div, dl, fieldset,
+ # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
+ # nav, ol, p, pre, section, table, or ul, element, or if
+ # there is no more content in the parent element.
+ if type in ("StartTag", "EmptyTag"):
+ return next["name"] in ('address', 'article', 'aside',
+ 'blockquote', 'datagrid', 'dialog',
+ 'dir', 'div', 'dl', 'fieldset', 'footer',
+ 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
+ 'header', 'hr', 'menu', 'nav', 'ol',
+ 'p', 'pre', 'section', 'table', 'ul')
+ else:
+ return type == "EndTag" or type is None
+ elif tagname == 'option':
+ # An option element's end tag may be omitted if the option
+ # element is immediately followed by another option element,
+ # or if it is immediately followed by an <code>optgroup</code>
+ # element, or if there is no more content in the parent
+ # element.
+ if type == "StartTag":
+ return next["name"] in ('option', 'optgroup')
+ else:
+ return type == "EndTag" or type is None
+ elif tagname in ('rt', 'rp'):
+ # An rt element's end tag may be omitted if the rt element is
+ # immediately followed by an rt or rp element, or if there is
+ # no more content in the parent element.
+ # An rp element's end tag may be omitted if the rp element is
+ # immediately followed by an rt or rp element, or if there is
+ # no more content in the parent element.
+ if type == "StartTag":
+ return next["name"] in ('rt', 'rp')
+ else:
+ return type == "EndTag" or type is None
+ elif tagname == 'colgroup':
+ # A colgroup element's end tag may be omitted if the colgroup
+ # element is not immediately followed by a space character or
+ # a comment.
+ if type in ("Comment", "SpaceCharacters"):
+ return False
+ elif type == "StartTag":
+ # XXX: we also look for an immediately following colgroup
+ # element. See is_optional_start.
+ return next["name"] != 'colgroup'
+ else:
+ return True
+ elif tagname in ('thead', 'tbody'):
+ # A thead element's end tag may be omitted if the thead element
+ # is immediately followed by a tbody or tfoot element.
+ # A tbody element's end tag may be omitted if the tbody element
+ # is immediately followed by a tbody or tfoot element, or if
+ # there is no more content in the parent element.
+ # A tfoot element's end tag may be omitted if the tfoot element
+ # is immediately followed by a tbody element, or if there is no
+ # more content in the parent element.
+ # XXX: we never omit the end tag when the following element is
+ # a tbody. See is_optional_start.
+ if type == "StartTag":
+ return next["name"] in ['tbody', 'tfoot']
+ elif tagname == 'tbody':
+ return type == "EndTag" or type is None
+ else:
+ return False
+ elif tagname == 'tfoot':
+ # A tfoot element's end tag may be omitted if the tfoot element
+ # is immediately followed by a tbody element, or if there is no
+ # more content in the parent element.
+ # XXX: we never omit the end tag when the following element is
+ # a tbody. See is_optional_start.
+ if type == "StartTag":
+ return next["name"] == 'tbody'
+ else:
+ return type == "EndTag" or type is None
+ elif tagname in ('td', 'th'):
+ # A td element's end tag may be omitted if the td element is
+ # immediately followed by a td or th element, or if there is
+ # no more content in the parent element.
+ # A th element's end tag may be omitted if the th element is
+ # immediately followed by a td or th element, or if there is
+ # no more content in the parent element.
+ if type == "StartTag":
+ return next["name"] in ('td', 'th')
+ else:
+ return type == "EndTag" or type is None
+ return False
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/sanitizer.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/sanitizer.py
new file mode 100644
index 000000000..b206b54e7
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/sanitizer.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import _base
+from ..sanitizer import HTMLSanitizerMixin
+
+
+class Filter(_base.Filter, HTMLSanitizerMixin):
+ def __iter__(self):
+ for token in _base.Filter.__iter__(self):
+ token = self.sanitize_token(token)
+ if token:
+ yield token
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/filters/whitespace.py b/testing/web-platform/tests/tools/html5lib/html5lib/filters/whitespace.py
new file mode 100644
index 000000000..dfc60eebd
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/filters/whitespace.py
@@ -0,0 +1,38 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import re
+
+from . import _base
+from ..constants import rcdataElements, spaceCharacters
+spaceCharacters = "".join(spaceCharacters)
+
+SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
+
+
+class Filter(_base.Filter):
+
+ spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
+
+ def __iter__(self):
+ preserve = 0
+ for token in _base.Filter.__iter__(self):
+ type = token["type"]
+ if type == "StartTag" \
+ and (preserve or token["name"] in self.spacePreserveElements):
+ preserve += 1
+
+ elif type == "EndTag" and preserve:
+ preserve -= 1
+
+ elif not preserve and type == "SpaceCharacters" and token["data"]:
+ # Test on token["data"] above to not introduce spaces where there were not
+ token["data"] = " "
+
+ elif not preserve and type == "Characters":
+ token["data"] = collapse_spaces(token["data"])
+
+ yield token
+
+
+def collapse_spaces(text):
+ return SPACES_REGEX.sub(' ', text)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/html5parser.py b/testing/web-platform/tests/tools/html5lib/html5lib/html5parser.py
new file mode 100644
index 000000000..5b9ce7d72
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/html5parser.py
@@ -0,0 +1,2723 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import with_metaclass
+
+import types
+
+from . import inputstream
+from . import tokenizer
+
+from . import treebuilders
+from .treebuilders._base import Marker
+
+from . import utils
+from . import constants
+from .constants import spaceCharacters, asciiUpper2Lower
+from .constants import specialElements
+from .constants import headingElements
+from .constants import cdataElements, rcdataElements
+from .constants import tokenTypes, ReparseException, namespaces
+from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
+from .constants import adjustForeignAttributes as adjustForeignAttributesMap
+
+
+def parse(doc, treebuilder="etree", encoding=None,
+ namespaceHTMLElements=True):
+ """Parse a string or file-like object into a tree"""
+ tb = treebuilders.getTreeBuilder(treebuilder)
+ p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
+ return p.parse(doc, encoding=encoding)
+
+
+def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
+ namespaceHTMLElements=True):
+ tb = treebuilders.getTreeBuilder(treebuilder)
+ p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
+ return p.parseFragment(doc, container=container, encoding=encoding)
+
+
+def method_decorator_metaclass(function):
+ class Decorated(type):
+ def __new__(meta, classname, bases, classDict):
+ for attributeName, attribute in classDict.items():
+ if isinstance(attribute, types.FunctionType):
+ attribute = function(attribute)
+
+ classDict[attributeName] = attribute
+ return type.__new__(meta, classname, bases, classDict)
+ return Decorated
+
+
+class HTMLParser(object):
+ """HTML parser. Generates a tree structure from a stream of (possibly
+ malformed) HTML"""
+
+ def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
+ strict=False, namespaceHTMLElements=True, debug=False):
+ """
+ strict - raise an exception when a parse error is encountered
+
+ tree - a treebuilder class controlling the type of tree that will be
+ returned. Built in treebuilders can be accessed through
+ html5lib.treebuilders.getTreeBuilder(treeType)
+
+ tokenizer - a class that provides a stream of tokens to the treebuilder.
+ This may be replaced for e.g. a sanitizer which converts some tags to
+ text
+ """
+
+ # Raise an exception on the first error encountered
+ self.strict = strict
+
+ if tree is None:
+ tree = treebuilders.getTreeBuilder("etree")
+ self.tree = tree(namespaceHTMLElements)
+ self.tokenizer_class = tokenizer
+ self.errors = []
+
+ self.phases = dict([(name, cls(self, self.tree)) for name, cls in
+ getPhases(debug).items()])
+
+ def _parse(self, stream, innerHTML=False, container="div",
+ encoding=None, parseMeta=True, useChardet=True, **kwargs):
+
+ self.innerHTMLMode = innerHTML
+ self.container = container
+ self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
+ parseMeta=parseMeta,
+ useChardet=useChardet,
+ parser=self, **kwargs)
+ self.reset()
+
+ while True:
+ try:
+ self.mainLoop()
+ break
+ except ReparseException:
+ self.reset()
+
+ def reset(self):
+ self.tree.reset()
+ self.firstStartTag = False
+ self.errors = []
+ self.log = [] # only used with debug mode
+ # "quirks" / "limited quirks" / "no quirks"
+ self.compatMode = "no quirks"
+
+ if self.innerHTMLMode:
+ self.innerHTML = self.container.lower()
+
+ if self.innerHTML in cdataElements:
+ self.tokenizer.state = self.tokenizer.rcdataState
+ elif self.innerHTML in rcdataElements:
+ self.tokenizer.state = self.tokenizer.rawtextState
+ elif self.innerHTML == 'plaintext':
+ self.tokenizer.state = self.tokenizer.plaintextState
+ else:
+ # state already is data state
+ # self.tokenizer.state = self.tokenizer.dataState
+ pass
+ self.phase = self.phases["beforeHtml"]
+ self.phase.insertHtmlElement()
+ self.resetInsertionMode()
+ else:
+ self.innerHTML = False
+ self.phase = self.phases["initial"]
+
+ self.lastPhase = None
+
+ self.beforeRCDataPhase = None
+
+ self.framesetOK = True
+
+ @property
+ def documentEncoding(self):
+ """The name of the character encoding
+ that was used to decode the input stream,
+ or :obj:`None` if that is not determined yet.
+
+ """
+ if not hasattr(self, 'tokenizer'):
+ return None
+ return self.tokenizer.stream.charEncoding[0]
+
+ def isHTMLIntegrationPoint(self, element):
+ if (element.name == "annotation-xml" and
+ element.namespace == namespaces["mathml"]):
+ return ("encoding" in element.attributes and
+ element.attributes["encoding"].translate(
+ asciiUpper2Lower) in
+ ("text/html", "application/xhtml+xml"))
+ else:
+ return (element.namespace, element.name) in htmlIntegrationPointElements
+
+ def isMathMLTextIntegrationPoint(self, element):
+ return (element.namespace, element.name) in mathmlTextIntegrationPointElements
+
+ def mainLoop(self):
+ CharactersToken = tokenTypes["Characters"]
+ SpaceCharactersToken = tokenTypes["SpaceCharacters"]
+ StartTagToken = tokenTypes["StartTag"]
+ EndTagToken = tokenTypes["EndTag"]
+ CommentToken = tokenTypes["Comment"]
+ DoctypeToken = tokenTypes["Doctype"]
+ ParseErrorToken = tokenTypes["ParseError"]
+
+ for token in self.normalizedTokens():
+ new_token = token
+ while new_token is not None:
+ currentNode = self.tree.openElements[-1] if self.tree.openElements else None
+ currentNodeNamespace = currentNode.namespace if currentNode else None
+ currentNodeName = currentNode.name if currentNode else None
+
+ type = new_token["type"]
+
+ if type == ParseErrorToken:
+ self.parseError(new_token["data"], new_token.get("datavars", {}))
+ new_token = None
+ else:
+ if (len(self.tree.openElements) == 0 or
+ currentNodeNamespace == self.tree.defaultNamespace or
+ (self.isMathMLTextIntegrationPoint(currentNode) and
+ ((type == StartTagToken and
+ token["name"] not in frozenset(["mglyph", "malignmark"])) or
+ type in (CharactersToken, SpaceCharactersToken))) or
+ (currentNodeNamespace == namespaces["mathml"] and
+ currentNodeName == "annotation-xml" and
+ token["name"] == "svg") or
+ (self.isHTMLIntegrationPoint(currentNode) and
+ type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
+ phase = self.phase
+ else:
+ phase = self.phases["inForeignContent"]
+
+ if type == CharactersToken:
+ new_token = phase.processCharacters(new_token)
+ elif type == SpaceCharactersToken:
+ new_token = phase.processSpaceCharacters(new_token)
+ elif type == StartTagToken:
+ new_token = phase.processStartTag(new_token)
+ elif type == EndTagToken:
+ new_token = phase.processEndTag(new_token)
+ elif type == CommentToken:
+ new_token = phase.processComment(new_token)
+ elif type == DoctypeToken:
+ new_token = phase.processDoctype(new_token)
+
+ if (type == StartTagToken and token["selfClosing"]
+ and not token["selfClosingAcknowledged"]):
+ self.parseError("non-void-element-with-trailing-solidus",
+ {"name": token["name"]})
+
+ # When the loop finishes it's EOF
+ reprocess = True
+ phases = []
+ while reprocess:
+ phases.append(self.phase)
+ reprocess = self.phase.processEOF()
+ if reprocess:
+ assert self.phase not in phases
+
+ def normalizedTokens(self):
+ for token in self.tokenizer:
+ yield self.normalizeToken(token)
+
+ def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
+ """Parse a HTML document into a well-formed tree
+
+ stream - a filelike object or string containing the HTML to be parsed
+
+ The optional encoding parameter must be a string that indicates
+ the encoding. If specified, that encoding will be used,
+ regardless of any BOM or later declaration (such as in a meta
+ element)
+ """
+ self._parse(stream, innerHTML=False, encoding=encoding,
+ parseMeta=parseMeta, useChardet=useChardet)
+ return self.tree.getDocument()
+
+ def parseFragment(self, stream, container="div", encoding=None,
+ parseMeta=False, useChardet=True):
+ """Parse a HTML fragment into a well-formed tree fragment
+
+ container - name of the element we're setting the innerHTML property
+ if set to None, default to 'div'
+
+ stream - a filelike object or string containing the HTML to be parsed
+
+ The optional encoding parameter must be a string that indicates
+ the encoding. If specified, that encoding will be used,
+ regardless of any BOM or later declaration (such as in a meta
+ element)
+ """
+ self._parse(stream, True, container=container, encoding=encoding)
+ return self.tree.getFragment()
+
+ def parseError(self, errorcode="XXX-undefined-error", datavars={}):
+ # XXX The idea is to make errorcode mandatory.
+ self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
+ if self.strict:
+ raise ParseError
+
+ def normalizeToken(self, token):
+ """ HTML5 specific normalizations to the token stream """
+
+ if token["type"] == tokenTypes["StartTag"]:
+ token["data"] = dict(token["data"][::-1])
+
+ return token
+
+ def adjustMathMLAttributes(self, token):
+ replacements = {"definitionurl": "definitionURL"}
+ for k, v in replacements.items():
+ if k in token["data"]:
+ token["data"][v] = token["data"][k]
+ del token["data"][k]
+
+ def adjustSVGAttributes(self, token):
+ replacements = {
+ "attributename": "attributeName",
+ "attributetype": "attributeType",
+ "basefrequency": "baseFrequency",
+ "baseprofile": "baseProfile",
+ "calcmode": "calcMode",
+ "clippathunits": "clipPathUnits",
+ "contentscripttype": "contentScriptType",
+ "contentstyletype": "contentStyleType",
+ "diffuseconstant": "diffuseConstant",
+ "edgemode": "edgeMode",
+ "externalresourcesrequired": "externalResourcesRequired",
+ "filterres": "filterRes",
+ "filterunits": "filterUnits",
+ "glyphref": "glyphRef",
+ "gradienttransform": "gradientTransform",
+ "gradientunits": "gradientUnits",
+ "kernelmatrix": "kernelMatrix",
+ "kernelunitlength": "kernelUnitLength",
+ "keypoints": "keyPoints",
+ "keysplines": "keySplines",
+ "keytimes": "keyTimes",
+ "lengthadjust": "lengthAdjust",
+ "limitingconeangle": "limitingConeAngle",
+ "markerheight": "markerHeight",
+ "markerunits": "markerUnits",
+ "markerwidth": "markerWidth",
+ "maskcontentunits": "maskContentUnits",
+ "maskunits": "maskUnits",
+ "numoctaves": "numOctaves",
+ "pathlength": "pathLength",
+ "patterncontentunits": "patternContentUnits",
+ "patterntransform": "patternTransform",
+ "patternunits": "patternUnits",
+ "pointsatx": "pointsAtX",
+ "pointsaty": "pointsAtY",
+ "pointsatz": "pointsAtZ",
+ "preservealpha": "preserveAlpha",
+ "preserveaspectratio": "preserveAspectRatio",
+ "primitiveunits": "primitiveUnits",
+ "refx": "refX",
+ "refy": "refY",
+ "repeatcount": "repeatCount",
+ "repeatdur": "repeatDur",
+ "requiredextensions": "requiredExtensions",
+ "requiredfeatures": "requiredFeatures",
+ "specularconstant": "specularConstant",
+ "specularexponent": "specularExponent",
+ "spreadmethod": "spreadMethod",
+ "startoffset": "startOffset",
+ "stddeviation": "stdDeviation",
+ "stitchtiles": "stitchTiles",
+ "surfacescale": "surfaceScale",
+ "systemlanguage": "systemLanguage",
+ "tablevalues": "tableValues",
+ "targetx": "targetX",
+ "targety": "targetY",
+ "textlength": "textLength",
+ "viewbox": "viewBox",
+ "viewtarget": "viewTarget",
+ "xchannelselector": "xChannelSelector",
+ "ychannelselector": "yChannelSelector",
+ "zoomandpan": "zoomAndPan"
+ }
+ for originalName in list(token["data"].keys()):
+ if originalName in replacements:
+ svgName = replacements[originalName]
+ token["data"][svgName] = token["data"][originalName]
+ del token["data"][originalName]
+
+ def adjustForeignAttributes(self, token):
+ replacements = adjustForeignAttributesMap
+
+ for originalName in token["data"].keys():
+ if originalName in replacements:
+ foreignName = replacements[originalName]
+ token["data"][foreignName] = token["data"][originalName]
+ del token["data"][originalName]
+
+ def reparseTokenNormal(self, token):
+ self.parser.phase()
+
+ def resetInsertionMode(self):
+ # The name of this method is mostly historical. (It's also used in the
+ # specification.)
+ last = False
+ newModes = {
+ "select": "inSelect",
+ "td": "inCell",
+ "th": "inCell",
+ "tr": "inRow",
+ "tbody": "inTableBody",
+ "thead": "inTableBody",
+ "tfoot": "inTableBody",
+ "caption": "inCaption",
+ "colgroup": "inColumnGroup",
+ "table": "inTable",
+ "head": "inBody",
+ "body": "inBody",
+ "frameset": "inFrameset",
+ "html": "beforeHead"
+ }
+ for node in self.tree.openElements[::-1]:
+ nodeName = node.name
+ new_phase = None
+ if node == self.tree.openElements[0]:
+ assert self.innerHTML
+ last = True
+ nodeName = self.innerHTML
+ # Check for conditions that should only happen in the innerHTML
+ # case
+ if nodeName in ("select", "colgroup", "head", "html"):
+ assert self.innerHTML
+
+ if not last and node.namespace != self.tree.defaultNamespace:
+ continue
+
+ if nodeName in newModes:
+ new_phase = self.phases[newModes[nodeName]]
+ break
+ elif last:
+ new_phase = self.phases["inBody"]
+ break
+
+ self.phase = new_phase
+
+ def parseRCDataRawtext(self, token, contentType):
+ """Generic RCDATA/RAWTEXT Parsing algorithm
+ contentType - RCDATA or RAWTEXT
+ """
+ assert contentType in ("RAWTEXT", "RCDATA")
+
+ self.tree.insertElement(token)
+
+ if contentType == "RAWTEXT":
+ self.tokenizer.state = self.tokenizer.rawtextState
+ else:
+ self.tokenizer.state = self.tokenizer.rcdataState
+
+ self.originalPhase = self.phase
+
+ self.phase = self.phases["text"]
+
+
+def getPhases(debug):
+ def log(function):
+ """Logger that records which phase processes each token"""
+ type_names = dict((value, key) for key, value in
+ constants.tokenTypes.items())
+
+ def wrapped(self, *args, **kwargs):
+ if function.__name__.startswith("process") and len(args) > 0:
+ token = args[0]
+ try:
+ info = {"type": type_names[token['type']]}
+ except:
+ raise
+ if token['type'] in constants.tagTokenTypes:
+ info["name"] = token['name']
+
+ self.parser.log.append((self.parser.tokenizer.state.__name__,
+ self.parser.phase.__class__.__name__,
+ self.__class__.__name__,
+ function.__name__,
+ info))
+ return function(self, *args, **kwargs)
+ else:
+ return function(self, *args, **kwargs)
+ return wrapped
+
+ def getMetaclass(use_metaclass, metaclass_func):
+ if use_metaclass:
+ return method_decorator_metaclass(metaclass_func)
+ else:
+ return type
+
+ class Phase(with_metaclass(getMetaclass(debug, log))):
+ """Base class for helper object that implements each phase of processing
+ """
+
+ def __init__(self, parser, tree):
+ self.parser = parser
+ self.tree = tree
+
+ def processEOF(self):
+ raise NotImplementedError
+
+ def processComment(self, token):
+ # For most phases the following is correct. Where it's not it will be
+ # overridden.
+ self.tree.insertComment(token, self.tree.openElements[-1])
+
+ def processDoctype(self, token):
+ self.parser.parseError("unexpected-doctype")
+
+ def processCharacters(self, token):
+ self.tree.insertText(token["data"])
+
+ def processSpaceCharacters(self, token):
+ self.tree.insertText(token["data"])
+
+ def processStartTag(self, token):
+ return self.startTagHandler[token["name"]](token)
+
+ def startTagHtml(self, token):
+ if not self.parser.firstStartTag and token["name"] == "html":
+ self.parser.parseError("non-html-root")
+ # XXX Need a check here to see if the first start tag token emitted is
+ # this token... If it's not, invoke self.parser.parseError().
+ for attr, value in token["data"].items():
+ if attr not in self.tree.openElements[0].attributes:
+ self.tree.openElements[0].attributes[attr] = value
+ self.parser.firstStartTag = False
+
+ def processEndTag(self, token):
+ return self.endTagHandler[token["name"]](token)
+
+ class InitialPhase(Phase):
+ def processSpaceCharacters(self, token):
+ pass
+
+ def processComment(self, token):
+ self.tree.insertComment(token, self.tree.document)
+
+ def processDoctype(self, token):
+ name = token["name"]
+ publicId = token["publicId"]
+ systemId = token["systemId"]
+ correct = token["correct"]
+
+ if (name != "html" or publicId is not None or
+ systemId is not None and systemId != "about:legacy-compat"):
+ self.parser.parseError("unknown-doctype")
+
+ if publicId is None:
+ publicId = ""
+
+ self.tree.insertDoctype(token)
+
+ if publicId != "":
+ publicId = publicId.translate(asciiUpper2Lower)
+
+ if (not correct or token["name"] != "html"
+ or publicId.startswith(
+ ("+//silmaril//dtd html pro v0r11 19970101//",
+ "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+ "-//as//dtd html 3.0 aswedit + extensions//",
+ "-//ietf//dtd html 2.0 level 1//",
+ "-//ietf//dtd html 2.0 level 2//",
+ "-//ietf//dtd html 2.0 strict level 1//",
+ "-//ietf//dtd html 2.0 strict level 2//",
+ "-//ietf//dtd html 2.0 strict//",
+ "-//ietf//dtd html 2.0//",
+ "-//ietf//dtd html 2.1e//",
+ "-//ietf//dtd html 3.0//",
+ "-//ietf//dtd html 3.2 final//",
+ "-//ietf//dtd html 3.2//",
+ "-//ietf//dtd html 3//",
+ "-//ietf//dtd html level 0//",
+ "-//ietf//dtd html level 1//",
+ "-//ietf//dtd html level 2//",
+ "-//ietf//dtd html level 3//",
+ "-//ietf//dtd html strict level 0//",
+ "-//ietf//dtd html strict level 1//",
+ "-//ietf//dtd html strict level 2//",
+ "-//ietf//dtd html strict level 3//",
+ "-//ietf//dtd html strict//",
+ "-//ietf//dtd html//",
+ "-//metrius//dtd metrius presentational//",
+ "-//microsoft//dtd internet explorer 2.0 html strict//",
+ "-//microsoft//dtd internet explorer 2.0 html//",
+ "-//microsoft//dtd internet explorer 2.0 tables//",
+ "-//microsoft//dtd internet explorer 3.0 html strict//",
+ "-//microsoft//dtd internet explorer 3.0 html//",
+ "-//microsoft//dtd internet explorer 3.0 tables//",
+ "-//netscape comm. corp.//dtd html//",
+ "-//netscape comm. corp.//dtd strict html//",
+ "-//o'reilly and associates//dtd html 2.0//",
+ "-//o'reilly and associates//dtd html extended 1.0//",
+ "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+ "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+ "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+ "-//spyglass//dtd html 2.0 extended//",
+ "-//sq//dtd html 2.0 hotmetal + extensions//",
+ "-//sun microsystems corp.//dtd hotjava html//",
+ "-//sun microsystems corp.//dtd hotjava strict html//",
+ "-//w3c//dtd html 3 1995-03-24//",
+ "-//w3c//dtd html 3.2 draft//",
+ "-//w3c//dtd html 3.2 final//",
+ "-//w3c//dtd html 3.2//",
+ "-//w3c//dtd html 3.2s draft//",
+ "-//w3c//dtd html 4.0 frameset//",
+ "-//w3c//dtd html 4.0 transitional//",
+ "-//w3c//dtd html experimental 19960712//",
+ "-//w3c//dtd html experimental 970421//",
+ "-//w3c//dtd w3 html//",
+ "-//w3o//dtd w3 html 3.0//",
+ "-//webtechs//dtd mozilla html 2.0//",
+ "-//webtechs//dtd mozilla html//"))
+ or publicId in
+ ("-//w3o//dtd w3 html strict 3.0//en//",
+ "-/w3c/dtd html 4.0 transitional/en",
+ "html")
+ or publicId.startswith(
+ ("-//w3c//dtd html 4.01 frameset//",
+ "-//w3c//dtd html 4.01 transitional//")) and
+ systemId is None
+ or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
+ self.parser.compatMode = "quirks"
+ elif (publicId.startswith(
+ ("-//w3c//dtd xhtml 1.0 frameset//",
+ "-//w3c//dtd xhtml 1.0 transitional//"))
+ or publicId.startswith(
+ ("-//w3c//dtd html 4.01 frameset//",
+ "-//w3c//dtd html 4.01 transitional//")) and
+ systemId is not None):
+ self.parser.compatMode = "limited quirks"
+
+ self.parser.phase = self.parser.phases["beforeHtml"]
+
+ def anythingElse(self):
+ self.parser.compatMode = "quirks"
+ self.parser.phase = self.parser.phases["beforeHtml"]
+
+ def processCharacters(self, token):
+ self.parser.parseError("expected-doctype-but-got-chars")
+ self.anythingElse()
+ return token
+
+ def processStartTag(self, token):
+ self.parser.parseError("expected-doctype-but-got-start-tag",
+ {"name": token["name"]})
+ self.anythingElse()
+ return token
+
+ def processEndTag(self, token):
+ self.parser.parseError("expected-doctype-but-got-end-tag",
+ {"name": token["name"]})
+ self.anythingElse()
+ return token
+
+ def processEOF(self):
+ self.parser.parseError("expected-doctype-but-got-eof")
+ self.anythingElse()
+ return True
+
+ class BeforeHtmlPhase(Phase):
+ # helper methods
+ def insertHtmlElement(self):
+ self.tree.insertRoot(impliedTagToken("html", "StartTag"))
+ self.parser.phase = self.parser.phases["beforeHead"]
+
+ # other
+ def processEOF(self):
+ self.insertHtmlElement()
+ return True
+
+ def processComment(self, token):
+ self.tree.insertComment(token, self.tree.document)
+
+ def processSpaceCharacters(self, token):
+ pass
+
+ def processCharacters(self, token):
+ self.insertHtmlElement()
+ return token
+
+ def processStartTag(self, token):
+ if token["name"] == "html":
+ self.parser.firstStartTag = True
+ self.insertHtmlElement()
+ return token
+
+ def processEndTag(self, token):
+ if token["name"] not in ("head", "body", "html", "br"):
+ self.parser.parseError("unexpected-end-tag-before-html",
+ {"name": token["name"]})
+ else:
+ self.insertHtmlElement()
+ return token
+
+ class BeforeHeadPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("head", self.startTagHead)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ (("head", "body", "html", "br"), self.endTagImplyHead)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def processEOF(self):
+ self.startTagHead(impliedTagToken("head", "StartTag"))
+ return True
+
+ def processSpaceCharacters(self, token):
+ pass
+
+ def processCharacters(self, token):
+ self.startTagHead(impliedTagToken("head", "StartTag"))
+ return token
+
+ def startTagHtml(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagHead(self, token):
+ self.tree.insertElement(token)
+ self.tree.headPointer = self.tree.openElements[-1]
+ self.parser.phase = self.parser.phases["inHead"]
+
+ def startTagOther(self, token):
+ self.startTagHead(impliedTagToken("head", "StartTag"))
+ return token
+
+ def endTagImplyHead(self, token):
+ self.startTagHead(impliedTagToken("head", "StartTag"))
+ return token
+
+ def endTagOther(self, token):
+ self.parser.parseError("end-tag-after-implied-root",
+ {"name": token["name"]})
+
+ class InHeadPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("title", self.startTagTitle),
+ (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
+ ("script", self.startTagScript),
+ (("base", "basefont", "bgsound", "command", "link"),
+ self.startTagBaseLinkCommand),
+ ("meta", self.startTagMeta),
+ ("head", self.startTagHead)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self. endTagHandler = utils.MethodDispatcher([
+ ("head", self.endTagHead),
+ (("br", "html", "body"), self.endTagHtmlBodyBr)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ # the real thing
+ def processEOF(self):
+ self.anythingElse()
+ return True
+
+ def processCharacters(self, token):
+ self.anythingElse()
+ return token
+
+ def startTagHtml(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagHead(self, token):
+ self.parser.parseError("two-heads-are-not-better-than-one")
+
+ def startTagBaseLinkCommand(self, token):
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+
+ def startTagMeta(self, token):
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+
+ attributes = token["data"]
+ if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
+ if "charset" in attributes:
+ self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
+ elif ("content" in attributes and
+ "http-equiv" in attributes and
+ attributes["http-equiv"].lower() == "content-type"):
+ # Encoding it as UTF-8 here is a hack, as really we should pass
+ # the abstract Unicode string, and just use the
+ # ContentAttrParser on that, but using UTF-8 allows all chars
+ # to be encoded and as a ASCII-superset works.
+ data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
+ parser = inputstream.ContentAttrParser(data)
+ codec = parser.parse()
+ self.parser.tokenizer.stream.changeEncoding(codec)
+
+ def startTagTitle(self, token):
+ self.parser.parseRCDataRawtext(token, "RCDATA")
+
+ def startTagNoScriptNoFramesStyle(self, token):
+ # Need to decide whether to implement the scripting-disabled case
+ self.parser.parseRCDataRawtext(token, "RAWTEXT")
+
+ def startTagScript(self, token):
+ self.tree.insertElement(token)
+ self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
+ self.parser.originalPhase = self.parser.phase
+ self.parser.phase = self.parser.phases["text"]
+
+ def startTagOther(self, token):
+ self.anythingElse()
+ return token
+
+ def endTagHead(self, token):
+ node = self.parser.tree.openElements.pop()
+ assert node.name == "head", "Expected head got %s" % node.name
+ self.parser.phase = self.parser.phases["afterHead"]
+
+ def endTagHtmlBodyBr(self, token):
+ self.anythingElse()
+ return token
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ def anythingElse(self):
+ self.endTagHead(impliedTagToken("head"))
+
+ # XXX If we implement a parser for which scripting is disabled we need to
+ # implement this phase.
+ #
+ # class InHeadNoScriptPhase(Phase):
+ class AfterHeadPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("body", self.startTagBody),
+ ("frameset", self.startTagFrameset),
+ (("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
+ "style", "title"),
+ self.startTagFromHead),
+ ("head", self.startTagHead)
+ ])
+ self.startTagHandler.default = self.startTagOther
+ self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
+ self.endTagHtmlBodyBr)])
+ self.endTagHandler.default = self.endTagOther
+
+ def processEOF(self):
+ self.anythingElse()
+ return True
+
+ def processCharacters(self, token):
+ self.anythingElse()
+ return token
+
+ def startTagHtml(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagBody(self, token):
+ self.parser.framesetOK = False
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inBody"]
+
+ def startTagFrameset(self, token):
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inFrameset"]
+
+ def startTagFromHead(self, token):
+ self.parser.parseError("unexpected-start-tag-out-of-my-head",
+ {"name": token["name"]})
+ self.tree.openElements.append(self.tree.headPointer)
+ self.parser.phases["inHead"].processStartTag(token)
+ for node in self.tree.openElements[::-1]:
+ if node.name == "head":
+ self.tree.openElements.remove(node)
+ break
+
+ def startTagHead(self, token):
+ self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
+
+ def startTagOther(self, token):
+ self.anythingElse()
+ return token
+
+ def endTagHtmlBodyBr(self, token):
+ self.anythingElse()
+ return token
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ def anythingElse(self):
+ self.tree.insertElement(impliedTagToken("body", "StartTag"))
+ self.parser.phase = self.parser.phases["inBody"]
+ self.parser.framesetOK = True
+
+ class InBodyPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
+ # the really-really-really-very crazy mode
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ # Keep a ref to this for special handling of whitespace in <pre>
+ self.processSpaceCharactersNonPre = self.processSpaceCharacters
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ (("base", "basefont", "bgsound", "command", "link", "meta",
+ "noframes", "script", "style", "title"),
+ self.startTagProcessInHead),
+ ("body", self.startTagBody),
+ ("frameset", self.startTagFrameset),
+ (("address", "article", "aside", "blockquote", "center", "details",
+ "details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+ "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
+ "section", "summary", "ul"),
+ self.startTagCloseP),
+ (headingElements, self.startTagHeading),
+ (("pre", "listing"), self.startTagPreListing),
+ ("form", self.startTagForm),
+ (("li", "dd", "dt"), self.startTagListItem),
+ ("plaintext", self.startTagPlaintext),
+ ("a", self.startTagA),
+ (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
+ "strong", "tt", "u"), self.startTagFormatting),
+ ("nobr", self.startTagNobr),
+ ("button", self.startTagButton),
+ (("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
+ ("xmp", self.startTagXmp),
+ ("table", self.startTagTable),
+ (("area", "br", "embed", "img", "keygen", "wbr"),
+ self.startTagVoidFormatting),
+ (("param", "source", "track"), self.startTagParamSource),
+ ("input", self.startTagInput),
+ ("hr", self.startTagHr),
+ ("image", self.startTagImage),
+ ("isindex", self.startTagIsIndex),
+ ("textarea", self.startTagTextarea),
+ ("iframe", self.startTagIFrame),
+ (("noembed", "noframes", "noscript"), self.startTagRawtext),
+ ("select", self.startTagSelect),
+ (("rp", "rt"), self.startTagRpRt),
+ (("option", "optgroup"), self.startTagOpt),
+ (("math"), self.startTagMath),
+ (("svg"), self.startTagSvg),
+ (("caption", "col", "colgroup", "frame", "head",
+ "tbody", "td", "tfoot", "th", "thead",
+ "tr"), self.startTagMisplaced)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("body", self.endTagBody),
+ ("html", self.endTagHtml),
+ (("address", "article", "aside", "blockquote", "button", "center",
+ "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+ "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
+ "section", "summary", "ul"), self.endTagBlock),
+ ("form", self.endTagForm),
+ ("p", self.endTagP),
+ (("dd", "dt", "li"), self.endTagListItem),
+ (headingElements, self.endTagHeading),
+ (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
+ "strike", "strong", "tt", "u"), self.endTagFormatting),
+ (("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
+ ("br", self.endTagBr),
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def isMatchingFormattingElement(self, node1, node2):
+ if node1.name != node2.name or node1.namespace != node2.namespace:
+ return False
+ elif len(node1.attributes) != len(node2.attributes):
+ return False
+ else:
+ attributes1 = sorted(node1.attributes.items())
+ attributes2 = sorted(node2.attributes.items())
+ for attr1, attr2 in zip(attributes1, attributes2):
+ if attr1 != attr2:
+ return False
+ return True
+
+ # helper
+ def addFormattingElement(self, token):
+ self.tree.insertElement(token)
+ element = self.tree.openElements[-1]
+
+ matchingElements = []
+ for node in self.tree.activeFormattingElements[::-1]:
+ if node is Marker:
+ break
+ elif self.isMatchingFormattingElement(node, element):
+ matchingElements.append(node)
+
+ assert len(matchingElements) <= 3
+ if len(matchingElements) == 3:
+ self.tree.activeFormattingElements.remove(matchingElements[-1])
+ self.tree.activeFormattingElements.append(element)
+
+ # the real deal
+ def processEOF(self):
+ allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
+ "tfoot", "th", "thead", "tr", "body",
+ "html"))
+ for node in self.tree.openElements[::-1]:
+ if node.name not in allowed_elements:
+ self.parser.parseError("expected-closing-tag-but-got-eof")
+ break
+ # Stop parsing
+
+ def processSpaceCharactersDropNewline(self, token):
+ # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
+ # want to drop leading newlines
+ data = token["data"]
+ self.processSpaceCharacters = self.processSpaceCharactersNonPre
+ if (data.startswith("\n") and
+ self.tree.openElements[-1].name in ("pre", "listing", "textarea")
+ and not self.tree.openElements[-1].hasContent()):
+ data = data[1:]
+ if data:
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertText(data)
+
+ def processCharacters(self, token):
+ if token["data"] == "\u0000":
+ # The tokenizer should always emit null on its own
+ return
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertText(token["data"])
+ # This must be bad for performance
+ if (self.parser.framesetOK and
+ any([char not in spaceCharacters
+ for char in token["data"]])):
+ self.parser.framesetOK = False
+
+ def processSpaceCharacters(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertText(token["data"])
+
+ def startTagProcessInHead(self, token):
+ return self.parser.phases["inHead"].processStartTag(token)
+
+ def startTagBody(self, token):
+ self.parser.parseError("unexpected-start-tag", {"name": "body"})
+ if (len(self.tree.openElements) == 1
+ or self.tree.openElements[1].name != "body"):
+ assert self.parser.innerHTML
+ else:
+ self.parser.framesetOK = False
+ for attr, value in token["data"].items():
+ if attr not in self.tree.openElements[1].attributes:
+ self.tree.openElements[1].attributes[attr] = value
+
+ def startTagFrameset(self, token):
+ self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
+ if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
+ assert self.parser.innerHTML
+ elif not self.parser.framesetOK:
+ pass
+ else:
+ if self.tree.openElements[1].parent:
+ self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
+ while self.tree.openElements[-1].name != "html":
+ self.tree.openElements.pop()
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inFrameset"]
+
+ def startTagCloseP(self, token):
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ self.tree.insertElement(token)
+
+ def startTagPreListing(self, token):
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ self.tree.insertElement(token)
+ self.parser.framesetOK = False
+ self.processSpaceCharacters = self.processSpaceCharactersDropNewline
+
+ def startTagForm(self, token):
+ if self.tree.formPointer:
+ self.parser.parseError("unexpected-start-tag", {"name": "form"})
+ else:
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ self.tree.insertElement(token)
+ self.tree.formPointer = self.tree.openElements[-1]
+
+ def startTagListItem(self, token):
+ self.parser.framesetOK = False
+
+ stopNamesMap = {"li": ["li"],
+ "dt": ["dt", "dd"],
+ "dd": ["dt", "dd"]}
+ stopNames = stopNamesMap[token["name"]]
+ for node in reversed(self.tree.openElements):
+ if node.name in stopNames:
+ self.parser.phase.processEndTag(
+ impliedTagToken(node.name, "EndTag"))
+ break
+ if (node.nameTuple in specialElements and
+ node.name not in ("address", "div", "p")):
+ break
+
+ if self.tree.elementInScope("p", variant="button"):
+ self.parser.phase.processEndTag(
+ impliedTagToken("p", "EndTag"))
+
+ self.tree.insertElement(token)
+
+ def startTagPlaintext(self, token):
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ self.tree.insertElement(token)
+ self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
+
+ def startTagHeading(self, token):
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ if self.tree.openElements[-1].name in headingElements:
+ self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
+ self.tree.openElements.pop()
+ self.tree.insertElement(token)
+
+ def startTagA(self, token):
+ afeAElement = self.tree.elementInActiveFormattingElements("a")
+ if afeAElement:
+ self.parser.parseError("unexpected-start-tag-implies-end-tag",
+ {"startName": "a", "endName": "a"})
+ self.endTagFormatting(impliedTagToken("a"))
+ if afeAElement in self.tree.openElements:
+ self.tree.openElements.remove(afeAElement)
+ if afeAElement in self.tree.activeFormattingElements:
+ self.tree.activeFormattingElements.remove(afeAElement)
+ self.tree.reconstructActiveFormattingElements()
+ self.addFormattingElement(token)
+
+ def startTagFormatting(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.addFormattingElement(token)
+
+ def startTagNobr(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ if self.tree.elementInScope("nobr"):
+ self.parser.parseError("unexpected-start-tag-implies-end-tag",
+ {"startName": "nobr", "endName": "nobr"})
+ self.processEndTag(impliedTagToken("nobr"))
+ # XXX Need tests that trigger the following
+ self.tree.reconstructActiveFormattingElements()
+ self.addFormattingElement(token)
+
+ def startTagButton(self, token):
+ if self.tree.elementInScope("button"):
+ self.parser.parseError("unexpected-start-tag-implies-end-tag",
+ {"startName": "button", "endName": "button"})
+ self.processEndTag(impliedTagToken("button"))
+ return token
+ else:
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertElement(token)
+ self.parser.framesetOK = False
+
+ def startTagAppletMarqueeObject(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertElement(token)
+ self.tree.activeFormattingElements.append(Marker)
+ self.parser.framesetOK = False
+
+ def startTagXmp(self, token):
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ self.tree.reconstructActiveFormattingElements()
+ self.parser.framesetOK = False
+ self.parser.parseRCDataRawtext(token, "RAWTEXT")
+
+ def startTagTable(self, token):
+ if self.parser.compatMode != "quirks":
+ if self.tree.elementInScope("p", variant="button"):
+ self.processEndTag(impliedTagToken("p"))
+ self.tree.insertElement(token)
+ self.parser.framesetOK = False
+ self.parser.phase = self.parser.phases["inTable"]
+
+ def startTagVoidFormatting(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+ self.parser.framesetOK = False
+
+ def startTagInput(self, token):
+ framesetOK = self.parser.framesetOK
+ self.startTagVoidFormatting(token)
+ if ("type" in token["data"] and
+ token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
+ # input type=hidden doesn't change framesetOK
+ self.parser.framesetOK = framesetOK
+
+ def startTagParamSource(self, token):
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+
+ def startTagHr(self, token):
+ if self.tree.elementInScope("p", variant="button"):
+ self.endTagP(impliedTagToken("p"))
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+ self.parser.framesetOK = False
+
+ def startTagImage(self, token):
+ # No really...
+ self.parser.parseError("unexpected-start-tag-treated-as",
+ {"originalName": "image", "newName": "img"})
+ self.processStartTag(impliedTagToken("img", "StartTag",
+ attributes=token["data"],
+ selfClosing=token["selfClosing"]))
+
+ def startTagIsIndex(self, token):
+ self.parser.parseError("deprecated-tag", {"name": "isindex"})
+ if self.tree.formPointer:
+ return
+ form_attrs = {}
+ if "action" in token["data"]:
+ form_attrs["action"] = token["data"]["action"]
+ self.processStartTag(impliedTagToken("form", "StartTag",
+ attributes=form_attrs))
+ self.processStartTag(impliedTagToken("hr", "StartTag"))
+ self.processStartTag(impliedTagToken("label", "StartTag"))
+ # XXX Localization ...
+ if "prompt" in token["data"]:
+ prompt = token["data"]["prompt"]
+ else:
+ prompt = "This is a searchable index. Enter search keywords: "
+ self.processCharacters(
+ {"type": tokenTypes["Characters"], "data": prompt})
+ attributes = token["data"].copy()
+ if "action" in attributes:
+ del attributes["action"]
+ if "prompt" in attributes:
+ del attributes["prompt"]
+ attributes["name"] = "isindex"
+ self.processStartTag(impliedTagToken("input", "StartTag",
+ attributes=attributes,
+ selfClosing=token["selfClosing"]))
+ self.processEndTag(impliedTagToken("label"))
+ self.processStartTag(impliedTagToken("hr", "StartTag"))
+ self.processEndTag(impliedTagToken("form"))
+
+ def startTagTextarea(self, token):
+ self.tree.insertElement(token)
+ self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
+ self.processSpaceCharacters = self.processSpaceCharactersDropNewline
+ self.parser.framesetOK = False
+
+ def startTagIFrame(self, token):
+ self.parser.framesetOK = False
+ self.startTagRawtext(token)
+
+ def startTagRawtext(self, token):
+ """iframe, noembed noframes, noscript(if scripting enabled)"""
+ self.parser.parseRCDataRawtext(token, "RAWTEXT")
+
+ def startTagOpt(self, token):
+ if self.tree.openElements[-1].name == "option":
+ self.parser.phase.processEndTag(impliedTagToken("option"))
+ self.tree.reconstructActiveFormattingElements()
+ self.parser.tree.insertElement(token)
+
+ def startTagSelect(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertElement(token)
+ self.parser.framesetOK = False
+ if self.parser.phase in (self.parser.phases["inTable"],
+ self.parser.phases["inCaption"],
+ self.parser.phases["inColumnGroup"],
+ self.parser.phases["inTableBody"],
+ self.parser.phases["inRow"],
+ self.parser.phases["inCell"]):
+ self.parser.phase = self.parser.phases["inSelectInTable"]
+ else:
+ self.parser.phase = self.parser.phases["inSelect"]
+
+ def startTagRpRt(self, token):
+ if self.tree.elementInScope("ruby"):
+ self.tree.generateImpliedEndTags()
+ if self.tree.openElements[-1].name != "ruby":
+ self.parser.parseError()
+ self.tree.insertElement(token)
+
+ def startTagMath(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.parser.adjustMathMLAttributes(token)
+ self.parser.adjustForeignAttributes(token)
+ token["namespace"] = namespaces["mathml"]
+ self.tree.insertElement(token)
+ # Need to get the parse error right for the case where the token
+ # has a namespace not equal to the xmlns attribute
+ if token["selfClosing"]:
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+
+ def startTagSvg(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.parser.adjustSVGAttributes(token)
+ self.parser.adjustForeignAttributes(token)
+ token["namespace"] = namespaces["svg"]
+ self.tree.insertElement(token)
+ # Need to get the parse error right for the case where the token
+ # has a namespace not equal to the xmlns attribute
+ if token["selfClosing"]:
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+
+ def startTagMisplaced(self, token):
+ """ Elements that should be children of other elements that have a
+ different insertion mode; here they are ignored
+ "caption", "col", "colgroup", "frame", "frameset", "head",
+ "option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
+ "tr", "noscript"
+ """
+ self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
+
+ def startTagOther(self, token):
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertElement(token)
+
+ def endTagP(self, token):
+ if not self.tree.elementInScope("p", variant="button"):
+ self.startTagCloseP(impliedTagToken("p", "StartTag"))
+ self.parser.parseError("unexpected-end-tag", {"name": "p"})
+ self.endTagP(impliedTagToken("p", "EndTag"))
+ else:
+ self.tree.generateImpliedEndTags("p")
+ if self.tree.openElements[-1].name != "p":
+ self.parser.parseError("unexpected-end-tag", {"name": "p"})
+ node = self.tree.openElements.pop()
+ while node.name != "p":
+ node = self.tree.openElements.pop()
+
+ def endTagBody(self, token):
+ if not self.tree.elementInScope("body"):
+ self.parser.parseError()
+ return
+ elif self.tree.openElements[-1].name != "body":
+ for node in self.tree.openElements[2:]:
+ if node.name not in frozenset(("dd", "dt", "li", "optgroup",
+ "option", "p", "rp", "rt",
+ "tbody", "td", "tfoot",
+ "th", "thead", "tr", "body",
+ "html")):
+ # Not sure this is the correct name for the parse error
+ self.parser.parseError(
+ "expected-one-end-tag-but-got-another",
+ {"expectedName": "body", "gotName": node.name})
+ break
+ self.parser.phase = self.parser.phases["afterBody"]
+
+ def endTagHtml(self, token):
+ # We repeat the test for the body end tag token being ignored here
+ if self.tree.elementInScope("body"):
+ self.endTagBody(impliedTagToken("body"))
+ return token
+
+ def endTagBlock(self, token):
+ # Put us back in the right whitespace handling mode
+ if token["name"] == "pre":
+ self.processSpaceCharacters = self.processSpaceCharactersNonPre
+ inScope = self.tree.elementInScope(token["name"])
+ if inScope:
+ self.tree.generateImpliedEndTags()
+ if self.tree.openElements[-1].name != token["name"]:
+ self.parser.parseError("end-tag-too-early", {"name": token["name"]})
+ if inScope:
+ node = self.tree.openElements.pop()
+ while node.name != token["name"]:
+ node = self.tree.openElements.pop()
+
+ def endTagForm(self, token):
+ node = self.tree.formPointer
+ self.tree.formPointer = None
+ if node is None or not self.tree.elementInScope(node):
+ self.parser.parseError("unexpected-end-tag",
+ {"name": "form"})
+ else:
+ self.tree.generateImpliedEndTags()
+ if self.tree.openElements[-1] != node:
+ self.parser.parseError("end-tag-too-early-ignored",
+ {"name": "form"})
+ self.tree.openElements.remove(node)
+
+ def endTagListItem(self, token):
+ if token["name"] == "li":
+ variant = "list"
+ else:
+ variant = None
+ if not self.tree.elementInScope(token["name"], variant=variant):
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+ else:
+ self.tree.generateImpliedEndTags(exclude=token["name"])
+ if self.tree.openElements[-1].name != token["name"]:
+ self.parser.parseError(
+ "end-tag-too-early",
+ {"name": token["name"]})
+ node = self.tree.openElements.pop()
+ while node.name != token["name"]:
+ node = self.tree.openElements.pop()
+
+ def endTagHeading(self, token):
+ for item in headingElements:
+ if self.tree.elementInScope(item):
+ self.tree.generateImpliedEndTags()
+ break
+ if self.tree.openElements[-1].name != token["name"]:
+ self.parser.parseError("end-tag-too-early", {"name": token["name"]})
+
+ for item in headingElements:
+ if self.tree.elementInScope(item):
+ item = self.tree.openElements.pop()
+ while item.name not in headingElements:
+ item = self.tree.openElements.pop()
+ break
+
+ def endTagFormatting(self, token):
+ """The much-feared adoption agency algorithm"""
+ # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
+ # XXX Better parseError messages appreciated.
+
+ # Step 1
+ outerLoopCounter = 0
+
+ # Step 2
+ while outerLoopCounter < 8:
+
+ # Step 3
+ outerLoopCounter += 1
+
+ # Step 4:
+
+ # Let the formatting element be the last element in
+ # the list of active formatting elements that:
+ # - is between the end of the list and the last scope
+ # marker in the list, if any, or the start of the list
+ # otherwise, and
+ # - has the same tag name as the token.
+ formattingElement = self.tree.elementInActiveFormattingElements(
+ token["name"])
+ if (not formattingElement or
+ (formattingElement in self.tree.openElements and
+ not self.tree.elementInScope(formattingElement.name))):
+ # If there is no such node, then abort these steps
+ # and instead act as described in the "any other
+ # end tag" entry below.
+ self.endTagOther(token)
+ return
+
+ # Otherwise, if there is such a node, but that node is
+ # not in the stack of open elements, then this is a
+ # parse error; remove the element from the list, and
+ # abort these steps.
+ elif formattingElement not in self.tree.openElements:
+ self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
+ self.tree.activeFormattingElements.remove(formattingElement)
+ return
+
+ # Otherwise, if there is such a node, and that node is
+ # also in the stack of open elements, but the element
+ # is not in scope, then this is a parse error; ignore
+ # the token, and abort these steps.
+ elif not self.tree.elementInScope(formattingElement.name):
+ self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
+ return
+
+ # Otherwise, there is a formatting element and that
+ # element is in the stack and is in scope. If the
+ # element is not the current node, this is a parse
+ # error. In any case, proceed with the algorithm as
+ # written in the following steps.
+ else:
+ if formattingElement != self.tree.openElements[-1]:
+ self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
+
+ # Step 5:
+
+ # Let the furthest block be the topmost node in the
+ # stack of open elements that is lower in the stack
+ # than the formatting element, and is an element in
+ # the special category. There might not be one.
+ afeIndex = self.tree.openElements.index(formattingElement)
+ furthestBlock = None
+ for element in self.tree.openElements[afeIndex:]:
+ if element.nameTuple in specialElements:
+ furthestBlock = element
+ break
+
+ # Step 6:
+
+ # If there is no furthest block, then the UA must
+ # first pop all the nodes from the bottom of the stack
+ # of open elements, from the current node up to and
+ # including the formatting element, then remove the
+ # formatting element from the list of active
+ # formatting elements, and finally abort these steps.
+ if furthestBlock is None:
+ element = self.tree.openElements.pop()
+ while element != formattingElement:
+ element = self.tree.openElements.pop()
+ self.tree.activeFormattingElements.remove(element)
+ return
+
+ # Step 7
+ commonAncestor = self.tree.openElements[afeIndex - 1]
+
+ # Step 8:
+ # The bookmark is supposed to help us identify where to reinsert
+ # nodes in step 15. We have to ensure that we reinsert nodes after
+ # the node before the active formatting element. Note the bookmark
+ # can move in step 9.7
+ bookmark = self.tree.activeFormattingElements.index(formattingElement)
+
+ # Step 9
+ lastNode = node = furthestBlock
+ innerLoopCounter = 0
+
+ index = self.tree.openElements.index(node)
+ while innerLoopCounter < 3:
+ innerLoopCounter += 1
+ # Node is element before node in open elements
+ index -= 1
+ node = self.tree.openElements[index]
+ if node not in self.tree.activeFormattingElements:
+ self.tree.openElements.remove(node)
+ continue
+ # Step 9.6
+ if node == formattingElement:
+ break
+ # Step 9.7
+ if lastNode == furthestBlock:
+ bookmark = self.tree.activeFormattingElements.index(node) + 1
+ # Step 9.8
+ clone = node.cloneNode()
+ # Replace node with clone
+ self.tree.activeFormattingElements[
+ self.tree.activeFormattingElements.index(node)] = clone
+ self.tree.openElements[
+ self.tree.openElements.index(node)] = clone
+ node = clone
+ # Step 9.9
+ # Remove lastNode from its parents, if any
+ if lastNode.parent:
+ lastNode.parent.removeChild(lastNode)
+ node.appendChild(lastNode)
+ # Step 9.10
+ lastNode = node
+
+ # Step 10
+ # Foster parent lastNode if commonAncestor is a
+ # table, tbody, tfoot, thead, or tr we need to foster
+ # parent the lastNode
+ if lastNode.parent:
+ lastNode.parent.removeChild(lastNode)
+
+ if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
+ parent, insertBefore = self.tree.getTableMisnestedNodePosition()
+ parent.insertBefore(lastNode, insertBefore)
+ else:
+ commonAncestor.appendChild(lastNode)
+
+ # Step 11
+ clone = formattingElement.cloneNode()
+
+ # Step 12
+ furthestBlock.reparentChildren(clone)
+
+ # Step 13
+ furthestBlock.appendChild(clone)
+
+ # Step 14
+ self.tree.activeFormattingElements.remove(formattingElement)
+ self.tree.activeFormattingElements.insert(bookmark, clone)
+
+ # Step 15
+ self.tree.openElements.remove(formattingElement)
+ self.tree.openElements.insert(
+ self.tree.openElements.index(furthestBlock) + 1, clone)
+
+ def endTagAppletMarqueeObject(self, token):
+ if self.tree.elementInScope(token["name"]):
+ self.tree.generateImpliedEndTags()
+ if self.tree.openElements[-1].name != token["name"]:
+ self.parser.parseError("end-tag-too-early", {"name": token["name"]})
+
+ if self.tree.elementInScope(token["name"]):
+ element = self.tree.openElements.pop()
+ while element.name != token["name"]:
+ element = self.tree.openElements.pop()
+ self.tree.clearActiveFormattingElements()
+
+ def endTagBr(self, token):
+ self.parser.parseError("unexpected-end-tag-treated-as",
+ {"originalName": "br", "newName": "br element"})
+ self.tree.reconstructActiveFormattingElements()
+ self.tree.insertElement(impliedTagToken("br", "StartTag"))
+ self.tree.openElements.pop()
+
+ def endTagOther(self, token):
+ for node in self.tree.openElements[::-1]:
+ if node.name == token["name"]:
+ self.tree.generateImpliedEndTags(exclude=token["name"])
+ if self.tree.openElements[-1].name != token["name"]:
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+ while self.tree.openElements.pop() != node:
+ pass
+ break
+ else:
+ if node.nameTuple in specialElements:
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+ break
+
+ class TextPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+ self.startTagHandler = utils.MethodDispatcher([])
+ self.startTagHandler.default = self.startTagOther
+ self.endTagHandler = utils.MethodDispatcher([
+ ("script", self.endTagScript)])
+ self.endTagHandler.default = self.endTagOther
+
+ def processCharacters(self, token):
+ self.tree.insertText(token["data"])
+
+ def processEOF(self):
+ self.parser.parseError("expected-named-closing-tag-but-got-eof",
+ {"name": self.tree.openElements[-1].name})
+ self.tree.openElements.pop()
+ self.parser.phase = self.parser.originalPhase
+ return True
+
+ def startTagOther(self, token):
+ assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
+
+ def endTagScript(self, token):
+ node = self.tree.openElements.pop()
+ assert node.name == "script"
+ self.parser.phase = self.parser.originalPhase
+ # The rest of this method is all stuff that only happens if
+ # document.write works
+
+ def endTagOther(self, token):
+ self.tree.openElements.pop()
+ self.parser.phase = self.parser.originalPhase
+
+ class InTablePhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-table
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("caption", self.startTagCaption),
+ ("colgroup", self.startTagColgroup),
+ ("col", self.startTagCol),
+ (("tbody", "tfoot", "thead"), self.startTagRowGroup),
+ (("td", "th", "tr"), self.startTagImplyTbody),
+ ("table", self.startTagTable),
+ (("style", "script"), self.startTagStyleScript),
+ ("input", self.startTagInput),
+ ("form", self.startTagForm)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("table", self.endTagTable),
+ (("body", "caption", "col", "colgroup", "html", "tbody", "td",
+ "tfoot", "th", "thead", "tr"), self.endTagIgnore)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ # helper methods
+ def clearStackToTableContext(self):
+ # "clear the stack back to a table context"
+ while self.tree.openElements[-1].name not in ("table", "html"):
+ # self.parser.parseError("unexpected-implied-end-tag-in-table",
+ # {"name": self.tree.openElements[-1].name})
+ self.tree.openElements.pop()
+ # When the current node is <html> it's an innerHTML case
+
+ # processing methods
+ def processEOF(self):
+ if self.tree.openElements[-1].name != "html":
+ self.parser.parseError("eof-in-table")
+ else:
+ assert self.parser.innerHTML
+ # Stop parsing
+
+ def processSpaceCharacters(self, token):
+ originalPhase = self.parser.phase
+ self.parser.phase = self.parser.phases["inTableText"]
+ self.parser.phase.originalPhase = originalPhase
+ self.parser.phase.processSpaceCharacters(token)
+
+ def processCharacters(self, token):
+ originalPhase = self.parser.phase
+ self.parser.phase = self.parser.phases["inTableText"]
+ self.parser.phase.originalPhase = originalPhase
+ self.parser.phase.processCharacters(token)
+
+ def insertText(self, token):
+ # If we get here there must be at least one non-whitespace character
+ # Do the table magic!
+ self.tree.insertFromTable = True
+ self.parser.phases["inBody"].processCharacters(token)
+ self.tree.insertFromTable = False
+
+ def startTagCaption(self, token):
+ self.clearStackToTableContext()
+ self.tree.activeFormattingElements.append(Marker)
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inCaption"]
+
+ def startTagColgroup(self, token):
+ self.clearStackToTableContext()
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inColumnGroup"]
+
+ def startTagCol(self, token):
+ self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
+ return token
+
+ def startTagRowGroup(self, token):
+ self.clearStackToTableContext()
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inTableBody"]
+
+ def startTagImplyTbody(self, token):
+ self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
+ return token
+
+ def startTagTable(self, token):
+ self.parser.parseError("unexpected-start-tag-implies-end-tag",
+ {"startName": "table", "endName": "table"})
+ self.parser.phase.processEndTag(impliedTagToken("table"))
+ if not self.parser.innerHTML:
+ return token
+
+ def startTagStyleScript(self, token):
+ return self.parser.phases["inHead"].processStartTag(token)
+
+ def startTagInput(self, token):
+ if ("type" in token["data"] and
+ token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
+ self.parser.parseError("unexpected-hidden-input-in-table")
+ self.tree.insertElement(token)
+ # XXX associate with form
+ self.tree.openElements.pop()
+ else:
+ self.startTagOther(token)
+
+ def startTagForm(self, token):
+ self.parser.parseError("unexpected-form-in-table")
+ if self.tree.formPointer is None:
+ self.tree.insertElement(token)
+ self.tree.formPointer = self.tree.openElements[-1]
+ self.tree.openElements.pop()
+
+ def startTagOther(self, token):
+ self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
+ # Do the table magic!
+ self.tree.insertFromTable = True
+ self.parser.phases["inBody"].processStartTag(token)
+ self.tree.insertFromTable = False
+
+ def endTagTable(self, token):
+ if self.tree.elementInScope("table", variant="table"):
+ self.tree.generateImpliedEndTags()
+ if self.tree.openElements[-1].name != "table":
+ self.parser.parseError("end-tag-too-early-named",
+ {"gotName": "table",
+ "expectedName": self.tree.openElements[-1].name})
+ while self.tree.openElements[-1].name != "table":
+ self.tree.openElements.pop()
+ self.tree.openElements.pop()
+ self.parser.resetInsertionMode()
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def endTagIgnore(self, token):
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
+ # Do the table magic!
+ self.tree.insertFromTable = True
+ self.parser.phases["inBody"].processEndTag(token)
+ self.tree.insertFromTable = False
+
+ class InTableTextPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+ self.originalPhase = None
+ self.characterTokens = []
+
+ def flushCharacters(self):
+ data = "".join([item["data"] for item in self.characterTokens])
+ if any([item not in spaceCharacters for item in data]):
+ token = {"type": tokenTypes["Characters"], "data": data}
+ self.parser.phases["inTable"].insertText(token)
+ elif data:
+ self.tree.insertText(data)
+ self.characterTokens = []
+
+ def processComment(self, token):
+ self.flushCharacters()
+ self.parser.phase = self.originalPhase
+ return token
+
+ def processEOF(self):
+ self.flushCharacters()
+ self.parser.phase = self.originalPhase
+ return True
+
+ def processCharacters(self, token):
+ if token["data"] == "\u0000":
+ return
+ self.characterTokens.append(token)
+
+ def processSpaceCharacters(self, token):
+ # pretty sure we should never reach here
+ self.characterTokens.append(token)
+ # assert False
+
+ def processStartTag(self, token):
+ self.flushCharacters()
+ self.parser.phase = self.originalPhase
+ return token
+
+ def processEndTag(self, token):
+ self.flushCharacters()
+ self.parser.phase = self.originalPhase
+ return token
+
+ class InCaptionPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-caption
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
+ "thead", "tr"), self.startTagTableElement)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("caption", self.endTagCaption),
+ ("table", self.endTagTable),
+ (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
+ "thead", "tr"), self.endTagIgnore)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def ignoreEndTagCaption(self):
+ return not self.tree.elementInScope("caption", variant="table")
+
+ def processEOF(self):
+ self.parser.phases["inBody"].processEOF()
+
+ def processCharacters(self, token):
+ return self.parser.phases["inBody"].processCharacters(token)
+
+ def startTagTableElement(self, token):
+ self.parser.parseError()
+ # XXX Have to duplicate logic here to find out if the tag is ignored
+ ignoreEndTag = self.ignoreEndTagCaption()
+ self.parser.phase.processEndTag(impliedTagToken("caption"))
+ if not ignoreEndTag:
+ return token
+
+ def startTagOther(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def endTagCaption(self, token):
+ if not self.ignoreEndTagCaption():
+ # AT this code is quite similar to endTagTable in "InTable"
+ self.tree.generateImpliedEndTags()
+ if self.tree.openElements[-1].name != "caption":
+ self.parser.parseError("expected-one-end-tag-but-got-another",
+ {"gotName": "caption",
+ "expectedName": self.tree.openElements[-1].name})
+ while self.tree.openElements[-1].name != "caption":
+ self.tree.openElements.pop()
+ self.tree.openElements.pop()
+ self.tree.clearActiveFormattingElements()
+ self.parser.phase = self.parser.phases["inTable"]
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def endTagTable(self, token):
+ self.parser.parseError()
+ ignoreEndTag = self.ignoreEndTagCaption()
+ self.parser.phase.processEndTag(impliedTagToken("caption"))
+ if not ignoreEndTag:
+ return token
+
+ def endTagIgnore(self, token):
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ def endTagOther(self, token):
+ return self.parser.phases["inBody"].processEndTag(token)
+
+ class InColumnGroupPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-column
+
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("col", self.startTagCol)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("colgroup", self.endTagColgroup),
+ ("col", self.endTagCol)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def ignoreEndTagColgroup(self):
+ return self.tree.openElements[-1].name == "html"
+
+ def processEOF(self):
+ if self.tree.openElements[-1].name == "html":
+ assert self.parser.innerHTML
+ return
+ else:
+ ignoreEndTag = self.ignoreEndTagColgroup()
+ self.endTagColgroup(impliedTagToken("colgroup"))
+ if not ignoreEndTag:
+ return True
+
+ def processCharacters(self, token):
+ ignoreEndTag = self.ignoreEndTagColgroup()
+ self.endTagColgroup(impliedTagToken("colgroup"))
+ if not ignoreEndTag:
+ return token
+
+ def startTagCol(self, token):
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+
+ def startTagOther(self, token):
+ ignoreEndTag = self.ignoreEndTagColgroup()
+ self.endTagColgroup(impliedTagToken("colgroup"))
+ if not ignoreEndTag:
+ return token
+
+ def endTagColgroup(self, token):
+ if self.ignoreEndTagColgroup():
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+ else:
+ self.tree.openElements.pop()
+ self.parser.phase = self.parser.phases["inTable"]
+
+ def endTagCol(self, token):
+ self.parser.parseError("no-end-tag", {"name": "col"})
+
+ def endTagOther(self, token):
+ ignoreEndTag = self.ignoreEndTagColgroup()
+ self.endTagColgroup(impliedTagToken("colgroup"))
+ if not ignoreEndTag:
+ return token
+
+ class InTableBodyPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-table0
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("tr", self.startTagTr),
+ (("td", "th"), self.startTagTableCell),
+ (("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
+ self.startTagTableOther)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
+ ("table", self.endTagTable),
+ (("body", "caption", "col", "colgroup", "html", "td", "th",
+ "tr"), self.endTagIgnore)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ # helper methods
+ def clearStackToTableBodyContext(self):
+ while self.tree.openElements[-1].name not in ("tbody", "tfoot",
+ "thead", "html"):
+ # self.parser.parseError("unexpected-implied-end-tag-in-table",
+ # {"name": self.tree.openElements[-1].name})
+ self.tree.openElements.pop()
+ if self.tree.openElements[-1].name == "html":
+ assert self.parser.innerHTML
+
+ # the rest
+ def processEOF(self):
+ self.parser.phases["inTable"].processEOF()
+
+ def processSpaceCharacters(self, token):
+ return self.parser.phases["inTable"].processSpaceCharacters(token)
+
+ def processCharacters(self, token):
+ return self.parser.phases["inTable"].processCharacters(token)
+
+ def startTagTr(self, token):
+ self.clearStackToTableBodyContext()
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inRow"]
+
+ def startTagTableCell(self, token):
+ self.parser.parseError("unexpected-cell-in-table-body",
+ {"name": token["name"]})
+ self.startTagTr(impliedTagToken("tr", "StartTag"))
+ return token
+
+ def startTagTableOther(self, token):
+ # XXX AT Any ideas on how to share this with endTagTable?
+ if (self.tree.elementInScope("tbody", variant="table") or
+ self.tree.elementInScope("thead", variant="table") or
+ self.tree.elementInScope("tfoot", variant="table")):
+ self.clearStackToTableBodyContext()
+ self.endTagTableRowGroup(
+ impliedTagToken(self.tree.openElements[-1].name))
+ return token
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def startTagOther(self, token):
+ return self.parser.phases["inTable"].processStartTag(token)
+
+ def endTagTableRowGroup(self, token):
+ if self.tree.elementInScope(token["name"], variant="table"):
+ self.clearStackToTableBodyContext()
+ self.tree.openElements.pop()
+ self.parser.phase = self.parser.phases["inTable"]
+ else:
+ self.parser.parseError("unexpected-end-tag-in-table-body",
+ {"name": token["name"]})
+
+ def endTagTable(self, token):
+ if (self.tree.elementInScope("tbody", variant="table") or
+ self.tree.elementInScope("thead", variant="table") or
+ self.tree.elementInScope("tfoot", variant="table")):
+ self.clearStackToTableBodyContext()
+ self.endTagTableRowGroup(
+ impliedTagToken(self.tree.openElements[-1].name))
+ return token
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def endTagIgnore(self, token):
+ self.parser.parseError("unexpected-end-tag-in-table-body",
+ {"name": token["name"]})
+
+ def endTagOther(self, token):
+ return self.parser.phases["inTable"].processEndTag(token)
+
+ class InRowPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-row
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ (("td", "th"), self.startTagTableCell),
+ (("caption", "col", "colgroup", "tbody", "tfoot", "thead",
+ "tr"), self.startTagTableOther)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("tr", self.endTagTr),
+ ("table", self.endTagTable),
+ (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
+ (("body", "caption", "col", "colgroup", "html", "td", "th"),
+ self.endTagIgnore)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ # helper methods (XXX unify this with other table helper methods)
+ def clearStackToTableRowContext(self):
+ while self.tree.openElements[-1].name not in ("tr", "html"):
+ self.parser.parseError("unexpected-implied-end-tag-in-table-row",
+ {"name": self.tree.openElements[-1].name})
+ self.tree.openElements.pop()
+
+ def ignoreEndTagTr(self):
+ return not self.tree.elementInScope("tr", variant="table")
+
+ # the rest
+ def processEOF(self):
+ self.parser.phases["inTable"].processEOF()
+
+ def processSpaceCharacters(self, token):
+ return self.parser.phases["inTable"].processSpaceCharacters(token)
+
+ def processCharacters(self, token):
+ return self.parser.phases["inTable"].processCharacters(token)
+
+ def startTagTableCell(self, token):
+ self.clearStackToTableRowContext()
+ self.tree.insertElement(token)
+ self.parser.phase = self.parser.phases["inCell"]
+ self.tree.activeFormattingElements.append(Marker)
+
+ def startTagTableOther(self, token):
+ ignoreEndTag = self.ignoreEndTagTr()
+ self.endTagTr(impliedTagToken("tr"))
+ # XXX how are we sure it's always ignored in the innerHTML case?
+ if not ignoreEndTag:
+ return token
+
+ def startTagOther(self, token):
+ return self.parser.phases["inTable"].processStartTag(token)
+
+ def endTagTr(self, token):
+ if not self.ignoreEndTagTr():
+ self.clearStackToTableRowContext()
+ self.tree.openElements.pop()
+ self.parser.phase = self.parser.phases["inTableBody"]
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def endTagTable(self, token):
+ ignoreEndTag = self.ignoreEndTagTr()
+ self.endTagTr(impliedTagToken("tr"))
+ # Reprocess the current tag if the tr end tag was not ignored
+ # XXX how are we sure it's always ignored in the innerHTML case?
+ if not ignoreEndTag:
+ return token
+
+ def endTagTableRowGroup(self, token):
+ if self.tree.elementInScope(token["name"], variant="table"):
+ self.endTagTr(impliedTagToken("tr"))
+ return token
+ else:
+ self.parser.parseError()
+
+ def endTagIgnore(self, token):
+ self.parser.parseError("unexpected-end-tag-in-table-row",
+ {"name": token["name"]})
+
+ def endTagOther(self, token):
+ return self.parser.phases["inTable"].processEndTag(token)
+
+ class InCellPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-cell
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
+ "thead", "tr"), self.startTagTableOther)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ (("td", "th"), self.endTagTableCell),
+ (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
+ (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ # helper
+ def closeCell(self):
+ if self.tree.elementInScope("td", variant="table"):
+ self.endTagTableCell(impliedTagToken("td"))
+ elif self.tree.elementInScope("th", variant="table"):
+ self.endTagTableCell(impliedTagToken("th"))
+
+ # the rest
+ def processEOF(self):
+ self.parser.phases["inBody"].processEOF()
+
+ def processCharacters(self, token):
+ return self.parser.phases["inBody"].processCharacters(token)
+
+ def startTagTableOther(self, token):
+ if (self.tree.elementInScope("td", variant="table") or
+ self.tree.elementInScope("th", variant="table")):
+ self.closeCell()
+ return token
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def startTagOther(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def endTagTableCell(self, token):
+ if self.tree.elementInScope(token["name"], variant="table"):
+ self.tree.generateImpliedEndTags(token["name"])
+ if self.tree.openElements[-1].name != token["name"]:
+ self.parser.parseError("unexpected-cell-end-tag",
+ {"name": token["name"]})
+ while True:
+ node = self.tree.openElements.pop()
+ if node.name == token["name"]:
+ break
+ else:
+ self.tree.openElements.pop()
+ self.tree.clearActiveFormattingElements()
+ self.parser.phase = self.parser.phases["inRow"]
+ else:
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ def endTagIgnore(self, token):
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ def endTagImply(self, token):
+ if self.tree.elementInScope(token["name"], variant="table"):
+ self.closeCell()
+ return token
+ else:
+ # sometimes innerHTML case
+ self.parser.parseError()
+
+ def endTagOther(self, token):
+ return self.parser.phases["inBody"].processEndTag(token)
+
+ class InSelectPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("option", self.startTagOption),
+ ("optgroup", self.startTagOptgroup),
+ ("select", self.startTagSelect),
+ (("input", "keygen", "textarea"), self.startTagInput),
+ ("script", self.startTagScript)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("option", self.endTagOption),
+ ("optgroup", self.endTagOptgroup),
+ ("select", self.endTagSelect)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-select
+ def processEOF(self):
+ if self.tree.openElements[-1].name != "html":
+ self.parser.parseError("eof-in-select")
+ else:
+ assert self.parser.innerHTML
+
+ def processCharacters(self, token):
+ if token["data"] == "\u0000":
+ return
+ self.tree.insertText(token["data"])
+
+ def startTagOption(self, token):
+ # We need to imply </option> if <option> is the current node.
+ if self.tree.openElements[-1].name == "option":
+ self.tree.openElements.pop()
+ self.tree.insertElement(token)
+
+ def startTagOptgroup(self, token):
+ if self.tree.openElements[-1].name == "option":
+ self.tree.openElements.pop()
+ if self.tree.openElements[-1].name == "optgroup":
+ self.tree.openElements.pop()
+ self.tree.insertElement(token)
+
+ def startTagSelect(self, token):
+ self.parser.parseError("unexpected-select-in-select")
+ self.endTagSelect(impliedTagToken("select"))
+
+ def startTagInput(self, token):
+ self.parser.parseError("unexpected-input-in-select")
+ if self.tree.elementInScope("select", variant="select"):
+ self.endTagSelect(impliedTagToken("select"))
+ return token
+ else:
+ assert self.parser.innerHTML
+
+ def startTagScript(self, token):
+ return self.parser.phases["inHead"].processStartTag(token)
+
+ def startTagOther(self, token):
+ self.parser.parseError("unexpected-start-tag-in-select",
+ {"name": token["name"]})
+
+ def endTagOption(self, token):
+ if self.tree.openElements[-1].name == "option":
+ self.tree.openElements.pop()
+ else:
+ self.parser.parseError("unexpected-end-tag-in-select",
+ {"name": "option"})
+
+ def endTagOptgroup(self, token):
+ # </optgroup> implicitly closes <option>
+ if (self.tree.openElements[-1].name == "option" and
+ self.tree.openElements[-2].name == "optgroup"):
+ self.tree.openElements.pop()
+ # It also closes </optgroup>
+ if self.tree.openElements[-1].name == "optgroup":
+ self.tree.openElements.pop()
+ # But nothing else
+ else:
+ self.parser.parseError("unexpected-end-tag-in-select",
+ {"name": "optgroup"})
+
+ def endTagSelect(self, token):
+ if self.tree.elementInScope("select", variant="select"):
+ node = self.tree.openElements.pop()
+ while node.name != "select":
+ node = self.tree.openElements.pop()
+ self.parser.resetInsertionMode()
+ else:
+ # innerHTML case
+ assert self.parser.innerHTML
+ self.parser.parseError()
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag-in-select",
+ {"name": token["name"]})
+
+ class InSelectInTablePhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
+ self.startTagTable)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
+ self.endTagTable)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def processEOF(self):
+ self.parser.phases["inSelect"].processEOF()
+
+ def processCharacters(self, token):
+ return self.parser.phases["inSelect"].processCharacters(token)
+
+ def startTagTable(self, token):
+ self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
+ self.endTagOther(impliedTagToken("select"))
+ return token
+
+ def startTagOther(self, token):
+ return self.parser.phases["inSelect"].processStartTag(token)
+
+ def endTagTable(self, token):
+ self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
+ if self.tree.elementInScope(token["name"], variant="table"):
+ self.endTagOther(impliedTagToken("select"))
+ return token
+
+ def endTagOther(self, token):
+ return self.parser.phases["inSelect"].processEndTag(token)
+
+ class InForeignContentPhase(Phase):
+ breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
+ "center", "code", "dd", "div", "dl", "dt",
+ "em", "embed", "h1", "h2", "h3",
+ "h4", "h5", "h6", "head", "hr", "i", "img",
+ "li", "listing", "menu", "meta", "nobr",
+ "ol", "p", "pre", "ruby", "s", "small",
+ "span", "strong", "strike", "sub", "sup",
+ "table", "tt", "u", "ul", "var"])
+
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ def adjustSVGTagNames(self, token):
+ replacements = {"altglyph": "altGlyph",
+ "altglyphdef": "altGlyphDef",
+ "altglyphitem": "altGlyphItem",
+ "animatecolor": "animateColor",
+ "animatemotion": "animateMotion",
+ "animatetransform": "animateTransform",
+ "clippath": "clipPath",
+ "feblend": "feBlend",
+ "fecolormatrix": "feColorMatrix",
+ "fecomponenttransfer": "feComponentTransfer",
+ "fecomposite": "feComposite",
+ "feconvolvematrix": "feConvolveMatrix",
+ "fediffuselighting": "feDiffuseLighting",
+ "fedisplacementmap": "feDisplacementMap",
+ "fedistantlight": "feDistantLight",
+ "feflood": "feFlood",
+ "fefunca": "feFuncA",
+ "fefuncb": "feFuncB",
+ "fefuncg": "feFuncG",
+ "fefuncr": "feFuncR",
+ "fegaussianblur": "feGaussianBlur",
+ "feimage": "feImage",
+ "femerge": "feMerge",
+ "femergenode": "feMergeNode",
+ "femorphology": "feMorphology",
+ "feoffset": "feOffset",
+ "fepointlight": "fePointLight",
+ "fespecularlighting": "feSpecularLighting",
+ "fespotlight": "feSpotLight",
+ "fetile": "feTile",
+ "feturbulence": "feTurbulence",
+ "foreignobject": "foreignObject",
+ "glyphref": "glyphRef",
+ "lineargradient": "linearGradient",
+ "radialgradient": "radialGradient",
+ "textpath": "textPath"}
+
+ if token["name"] in replacements:
+ token["name"] = replacements[token["name"]]
+
+ def processCharacters(self, token):
+ if token["data"] == "\u0000":
+ token["data"] = "\uFFFD"
+ elif (self.parser.framesetOK and
+ any(char not in spaceCharacters for char in token["data"])):
+ self.parser.framesetOK = False
+ Phase.processCharacters(self, token)
+
+ def processStartTag(self, token):
+ currentNode = self.tree.openElements[-1]
+ if (token["name"] in self.breakoutElements or
+ (token["name"] == "font" and
+ set(token["data"].keys()) & set(["color", "face", "size"]))):
+ self.parser.parseError("unexpected-html-element-in-foreign-content",
+ {"name": token["name"]})
+ while (self.tree.openElements[-1].namespace !=
+ self.tree.defaultNamespace and
+ not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
+ not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
+ self.tree.openElements.pop()
+ return token
+
+ else:
+ if currentNode.namespace == namespaces["mathml"]:
+ self.parser.adjustMathMLAttributes(token)
+ elif currentNode.namespace == namespaces["svg"]:
+ self.adjustSVGTagNames(token)
+ self.parser.adjustSVGAttributes(token)
+ self.parser.adjustForeignAttributes(token)
+ token["namespace"] = currentNode.namespace
+ self.tree.insertElement(token)
+ if token["selfClosing"]:
+ self.tree.openElements.pop()
+ token["selfClosingAcknowledged"] = True
+
+ def processEndTag(self, token):
+ nodeIndex = len(self.tree.openElements) - 1
+ node = self.tree.openElements[-1]
+ if node.name != token["name"]:
+ self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+ while True:
+ if node.name.translate(asciiUpper2Lower) == token["name"]:
+ # XXX this isn't in the spec but it seems necessary
+ if self.parser.phase == self.parser.phases["inTableText"]:
+ self.parser.phase.flushCharacters()
+ self.parser.phase = self.parser.phase.originalPhase
+ while self.tree.openElements.pop() != node:
+ assert self.tree.openElements
+ new_token = None
+ break
+ nodeIndex -= 1
+
+ node = self.tree.openElements[nodeIndex]
+ if node.namespace != self.tree.defaultNamespace:
+ continue
+ else:
+ new_token = self.parser.phase.processEndTag(token)
+ break
+ return new_token
+
+ class AfterBodyPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
+ self.endTagHandler.default = self.endTagOther
+
+ def processEOF(self):
+ # Stop parsing
+ pass
+
+ def processComment(self, token):
+ # This is needed because data is to be appended to the <html> element
+ # here and not to whatever is currently open.
+ self.tree.insertComment(token, self.tree.openElements[0])
+
+ def processCharacters(self, token):
+ self.parser.parseError("unexpected-char-after-body")
+ self.parser.phase = self.parser.phases["inBody"]
+ return token
+
+ def startTagHtml(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagOther(self, token):
+ self.parser.parseError("unexpected-start-tag-after-body",
+ {"name": token["name"]})
+ self.parser.phase = self.parser.phases["inBody"]
+ return token
+
+ def endTagHtml(self, name):
+ if self.parser.innerHTML:
+ self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
+ else:
+ self.parser.phase = self.parser.phases["afterAfterBody"]
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag-after-body",
+ {"name": token["name"]})
+ self.parser.phase = self.parser.phases["inBody"]
+ return token
+
+ class InFramesetPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("frameset", self.startTagFrameset),
+ ("frame", self.startTagFrame),
+ ("noframes", self.startTagNoframes)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("frameset", self.endTagFrameset)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def processEOF(self):
+ if self.tree.openElements[-1].name != "html":
+ self.parser.parseError("eof-in-frameset")
+ else:
+ assert self.parser.innerHTML
+
+ def processCharacters(self, token):
+ self.parser.parseError("unexpected-char-in-frameset")
+
+ def startTagFrameset(self, token):
+ self.tree.insertElement(token)
+
+ def startTagFrame(self, token):
+ self.tree.insertElement(token)
+ self.tree.openElements.pop()
+
+ def startTagNoframes(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagOther(self, token):
+ self.parser.parseError("unexpected-start-tag-in-frameset",
+ {"name": token["name"]})
+
+ def endTagFrameset(self, token):
+ if self.tree.openElements[-1].name == "html":
+ # innerHTML case
+ self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
+ else:
+ self.tree.openElements.pop()
+ if (not self.parser.innerHTML and
+ self.tree.openElements[-1].name != "frameset"):
+ # If we're not in innerHTML mode and the the current node is not a
+ # "frameset" element (anymore) then switch.
+ self.parser.phase = self.parser.phases["afterFrameset"]
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag-in-frameset",
+ {"name": token["name"]})
+
+ class AfterFramesetPhase(Phase):
+ # http://www.whatwg.org/specs/web-apps/current-work/#after3
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("noframes", self.startTagNoframes)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ self.endTagHandler = utils.MethodDispatcher([
+ ("html", self.endTagHtml)
+ ])
+ self.endTagHandler.default = self.endTagOther
+
+ def processEOF(self):
+ # Stop parsing
+ pass
+
+ def processCharacters(self, token):
+ self.parser.parseError("unexpected-char-after-frameset")
+
+ def startTagNoframes(self, token):
+ return self.parser.phases["inHead"].processStartTag(token)
+
+ def startTagOther(self, token):
+ self.parser.parseError("unexpected-start-tag-after-frameset",
+ {"name": token["name"]})
+
+ def endTagHtml(self, token):
+ self.parser.phase = self.parser.phases["afterAfterFrameset"]
+
+ def endTagOther(self, token):
+ self.parser.parseError("unexpected-end-tag-after-frameset",
+ {"name": token["name"]})
+
+ class AfterAfterBodyPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ def processEOF(self):
+ pass
+
+ def processComment(self, token):
+ self.tree.insertComment(token, self.tree.document)
+
+ def processSpaceCharacters(self, token):
+ return self.parser.phases["inBody"].processSpaceCharacters(token)
+
+ def processCharacters(self, token):
+ self.parser.parseError("expected-eof-but-got-char")
+ self.parser.phase = self.parser.phases["inBody"]
+ return token
+
+ def startTagHtml(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagOther(self, token):
+ self.parser.parseError("expected-eof-but-got-start-tag",
+ {"name": token["name"]})
+ self.parser.phase = self.parser.phases["inBody"]
+ return token
+
+ def processEndTag(self, token):
+ self.parser.parseError("expected-eof-but-got-end-tag",
+ {"name": token["name"]})
+ self.parser.phase = self.parser.phases["inBody"]
+ return token
+
+ class AfterAfterFramesetPhase(Phase):
+ def __init__(self, parser, tree):
+ Phase.__init__(self, parser, tree)
+
+ self.startTagHandler = utils.MethodDispatcher([
+ ("html", self.startTagHtml),
+ ("noframes", self.startTagNoFrames)
+ ])
+ self.startTagHandler.default = self.startTagOther
+
+ def processEOF(self):
+ pass
+
+ def processComment(self, token):
+ self.tree.insertComment(token, self.tree.document)
+
+ def processSpaceCharacters(self, token):
+ return self.parser.phases["inBody"].processSpaceCharacters(token)
+
+ def processCharacters(self, token):
+ self.parser.parseError("expected-eof-but-got-char")
+
+ def startTagHtml(self, token):
+ return self.parser.phases["inBody"].processStartTag(token)
+
+ def startTagNoFrames(self, token):
+ return self.parser.phases["inHead"].processStartTag(token)
+
+ def startTagOther(self, token):
+ self.parser.parseError("expected-eof-but-got-start-tag",
+ {"name": token["name"]})
+
+ def processEndTag(self, token):
+ self.parser.parseError("expected-eof-but-got-end-tag",
+ {"name": token["name"]})
+
+ return {
+ "initial": InitialPhase,
+ "beforeHtml": BeforeHtmlPhase,
+ "beforeHead": BeforeHeadPhase,
+ "inHead": InHeadPhase,
+ # XXX "inHeadNoscript": InHeadNoScriptPhase,
+ "afterHead": AfterHeadPhase,
+ "inBody": InBodyPhase,
+ "text": TextPhase,
+ "inTable": InTablePhase,
+ "inTableText": InTableTextPhase,
+ "inCaption": InCaptionPhase,
+ "inColumnGroup": InColumnGroupPhase,
+ "inTableBody": InTableBodyPhase,
+ "inRow": InRowPhase,
+ "inCell": InCellPhase,
+ "inSelect": InSelectPhase,
+ "inSelectInTable": InSelectInTablePhase,
+ "inForeignContent": InForeignContentPhase,
+ "afterBody": AfterBodyPhase,
+ "inFrameset": InFramesetPhase,
+ "afterFrameset": AfterFramesetPhase,
+ "afterAfterBody": AfterAfterBodyPhase,
+ "afterAfterFrameset": AfterAfterFramesetPhase,
+ # XXX after after frameset
+ }
+
+
+def impliedTagToken(name, type="EndTag", attributes=None,
+ selfClosing=False):
+ if attributes is None:
+ attributes = {}
+ return {"type": tokenTypes[type], "name": name, "data": attributes,
+ "selfClosing": selfClosing}
+
+
+class ParseError(Exception):
+ """Error in parsed document"""
+ pass
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/ihatexml.py b/testing/web-platform/tests/tools/html5lib/html5lib/ihatexml.py
new file mode 100644
index 000000000..0fc79308e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/ihatexml.py
@@ -0,0 +1,285 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import re
+import warnings
+
+from .constants import DataLossWarning
+
+baseChar = """
+[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
+[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
+[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
+[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
+[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
+[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
+[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
+[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
+[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
+[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
+[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
+[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
+[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
+[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
+[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
+[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
+[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
+[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
+[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
+[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
+[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
+[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
+[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
+[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
+[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
+[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
+[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
+[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
+[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
+[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
+#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
+#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
+#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
+[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
+[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
+#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
+[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
+[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
+[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
+[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
+[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
+#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
+[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
+[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
+[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
+[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
+
+ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
+
+combiningCharacter = """
+[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
+[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
+[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
+[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
+#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
+[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
+[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
+#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
+[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
+[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
+#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
+[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
+[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
+[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
+[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
+[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
+#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
+[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
+#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
+[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
+[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
+#x3099 | #x309A"""
+
+digit = """
+[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
+[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
+[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
+[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
+
+extender = """
+#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
+#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
+
+letter = " | ".join([baseChar, ideographic])
+
+# Without the
+name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
+ extender])
+nameFirst = " | ".join([letter, "_"])
+
+reChar = re.compile(r"#x([\d|A-F]{4,4})")
+reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
+
+
+def charStringToList(chars):
+ charRanges = [item.strip() for item in chars.split(" | ")]
+ rv = []
+ for item in charRanges:
+ foundMatch = False
+ for regexp in (reChar, reCharRange):
+ match = regexp.match(item)
+ if match is not None:
+ rv.append([hexToInt(item) for item in match.groups()])
+ if len(rv[-1]) == 1:
+ rv[-1] = rv[-1] * 2
+ foundMatch = True
+ break
+ if not foundMatch:
+ assert len(item) == 1
+
+ rv.append([ord(item)] * 2)
+ rv = normaliseCharList(rv)
+ return rv
+
+
+def normaliseCharList(charList):
+ charList = sorted(charList)
+ for item in charList:
+ assert item[1] >= item[0]
+ rv = []
+ i = 0
+ while i < len(charList):
+ j = 1
+ rv.append(charList[i])
+ while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
+ rv[-1][1] = charList[i + j][1]
+ j += 1
+ i += j
+ return rv
+
+# We don't really support characters above the BMP :(
+max_unicode = int("FFFF", 16)
+
+
+def missingRanges(charList):
+ rv = []
+ if charList[0] != 0:
+ rv.append([0, charList[0][0] - 1])
+ for i, item in enumerate(charList[:-1]):
+ rv.append([item[1] + 1, charList[i + 1][0] - 1])
+ if charList[-1][1] != max_unicode:
+ rv.append([charList[-1][1] + 1, max_unicode])
+ return rv
+
+
+def listToRegexpStr(charList):
+ rv = []
+ for item in charList:
+ if item[0] == item[1]:
+ rv.append(escapeRegexp(chr(item[0])))
+ else:
+ rv.append(escapeRegexp(chr(item[0])) + "-" +
+ escapeRegexp(chr(item[1])))
+ return "[%s]" % "".join(rv)
+
+
+def hexToInt(hex_str):
+ return int(hex_str, 16)
+
+
+def escapeRegexp(string):
+ specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
+ "[", "]", "|", "(", ")", "-")
+ for char in specialCharacters:
+ string = string.replace(char, "\\" + char)
+
+ return string
+
+# output from the above
+nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
+
+nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
+
+# Simpler things
+nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
+
+
+class InfosetFilter(object):
+ replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
+
+ def __init__(self, replaceChars=None,
+ dropXmlnsLocalName=False,
+ dropXmlnsAttrNs=False,
+ preventDoubleDashComments=False,
+ preventDashAtCommentEnd=False,
+ replaceFormFeedCharacters=True,
+ preventSingleQuotePubid=False):
+
+ self.dropXmlnsLocalName = dropXmlnsLocalName
+ self.dropXmlnsAttrNs = dropXmlnsAttrNs
+
+ self.preventDoubleDashComments = preventDoubleDashComments
+ self.preventDashAtCommentEnd = preventDashAtCommentEnd
+
+ self.replaceFormFeedCharacters = replaceFormFeedCharacters
+
+ self.preventSingleQuotePubid = preventSingleQuotePubid
+
+ self.replaceCache = {}
+
+ def coerceAttribute(self, name, namespace=None):
+ if self.dropXmlnsLocalName and name.startswith("xmlns:"):
+ warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
+ return None
+ elif (self.dropXmlnsAttrNs and
+ namespace == "http://www.w3.org/2000/xmlns/"):
+ warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
+ return None
+ else:
+ return self.toXmlName(name)
+
+ def coerceElement(self, name, namespace=None):
+ return self.toXmlName(name)
+
+ def coerceComment(self, data):
+ if self.preventDoubleDashComments:
+ while "--" in data:
+ warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
+ data = data.replace("--", "- -")
+ return data
+
+ def coerceCharacters(self, data):
+ if self.replaceFormFeedCharacters:
+ for i in range(data.count("\x0C")):
+ warnings.warn("Text cannot contain U+000C", DataLossWarning)
+ data = data.replace("\x0C", " ")
+ # Other non-xml characters
+ return data
+
+ def coercePubid(self, data):
+ dataOutput = data
+ for char in nonPubidCharRegexp.findall(data):
+ warnings.warn("Coercing non-XML pubid", DataLossWarning)
+ replacement = self.getReplacementCharacter(char)
+ dataOutput = dataOutput.replace(char, replacement)
+ if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
+ warnings.warn("Pubid cannot contain single quote", DataLossWarning)
+ dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
+ return dataOutput
+
+ def toXmlName(self, name):
+ nameFirst = name[0]
+ nameRest = name[1:]
+ m = nonXmlNameFirstBMPRegexp.match(nameFirst)
+ if m:
+ warnings.warn("Coercing non-XML name", DataLossWarning)
+ nameFirstOutput = self.getReplacementCharacter(nameFirst)
+ else:
+ nameFirstOutput = nameFirst
+
+ nameRestOutput = nameRest
+ replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
+ for char in replaceChars:
+ warnings.warn("Coercing non-XML name", DataLossWarning)
+ replacement = self.getReplacementCharacter(char)
+ nameRestOutput = nameRestOutput.replace(char, replacement)
+ return nameFirstOutput + nameRestOutput
+
+ def getReplacementCharacter(self, char):
+ if char in self.replaceCache:
+ replacement = self.replaceCache[char]
+ else:
+ replacement = self.escapeChar(char)
+ return replacement
+
+ def fromXmlName(self, name):
+ for item in set(self.replacementRegexp.findall(name)):
+ name = name.replace(item, self.unescapeChar(item))
+ return name
+
+ def escapeChar(self, char):
+ replacement = "U%05X" % ord(char)
+ self.replaceCache[char] = replacement
+ return replacement
+
+ def unescapeChar(self, charcode):
+ return chr(int(charcode[1:], 16))
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/inputstream.py b/testing/web-platform/tests/tools/html5lib/html5lib/inputstream.py
new file mode 100644
index 000000000..9e03b9313
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/inputstream.py
@@ -0,0 +1,886 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+from six.moves import http_client
+
+import codecs
+import re
+
+from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
+from .constants import encodings, ReparseException
+from . import utils
+
+from io import StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ BytesIO = StringIO
+
+try:
+ from io import BufferedIOBase
+except ImportError:
+ class BufferedIOBase(object):
+ pass
+
+# Non-unicode versions of constants for use in the pre-parser
+spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
+asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
+asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
+spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
+
+invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
+
+non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
+ 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
+ 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
+ 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
+ 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
+ 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
+ 0x10FFFE, 0x10FFFF])
+
+ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
+
+# Cache for charsUntil()
+charsUntilRegEx = {}
+
+
+class BufferedStream(object):
+ """Buffering for streams that do not have buffering of their own
+
+ The buffer is implemented as a list of chunks on the assumption that
+ joining many strings will be slow since it is O(n**2)
+ """
+
+ def __init__(self, stream):
+ self.stream = stream
+ self.buffer = []
+ self.position = [-1, 0] # chunk number, offset
+
+ def tell(self):
+ pos = 0
+ for chunk in self.buffer[:self.position[0]]:
+ pos += len(chunk)
+ pos += self.position[1]
+ return pos
+
+ def seek(self, pos):
+ assert pos <= self._bufferedBytes()
+ offset = pos
+ i = 0
+ while len(self.buffer[i]) < offset:
+ offset -= len(self.buffer[i])
+ i += 1
+ self.position = [i, offset]
+
+ def read(self, bytes):
+ if not self.buffer:
+ return self._readStream(bytes)
+ elif (self.position[0] == len(self.buffer) and
+ self.position[1] == len(self.buffer[-1])):
+ return self._readStream(bytes)
+ else:
+ return self._readFromBuffer(bytes)
+
+ def _bufferedBytes(self):
+ return sum([len(item) for item in self.buffer])
+
+ def _readStream(self, bytes):
+ data = self.stream.read(bytes)
+ self.buffer.append(data)
+ self.position[0] += 1
+ self.position[1] = len(data)
+ return data
+
+ def _readFromBuffer(self, bytes):
+ remainingBytes = bytes
+ rv = []
+ bufferIndex = self.position[0]
+ bufferOffset = self.position[1]
+ while bufferIndex < len(self.buffer) and remainingBytes != 0:
+ assert remainingBytes > 0
+ bufferedData = self.buffer[bufferIndex]
+
+ if remainingBytes <= len(bufferedData) - bufferOffset:
+ bytesToRead = remainingBytes
+ self.position = [bufferIndex, bufferOffset + bytesToRead]
+ else:
+ bytesToRead = len(bufferedData) - bufferOffset
+ self.position = [bufferIndex, len(bufferedData)]
+ bufferIndex += 1
+ rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
+ remainingBytes -= bytesToRead
+
+ bufferOffset = 0
+
+ if remainingBytes:
+ rv.append(self._readStream(remainingBytes))
+
+ return b"".join(rv)
+
+
+def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
+ if isinstance(source, http_client.HTTPResponse):
+ # Work around Python bug #20007: read(0) closes the connection.
+ # http://bugs.python.org/issue20007
+ isUnicode = False
+ elif hasattr(source, "read"):
+ isUnicode = isinstance(source.read(0), text_type)
+ else:
+ isUnicode = isinstance(source, text_type)
+
+ if isUnicode:
+ if encoding is not None:
+ raise TypeError("Cannot explicitly set an encoding with a unicode string")
+
+ return HTMLUnicodeInputStream(source)
+ else:
+ return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
+
+
+class HTMLUnicodeInputStream(object):
+ """Provides a unicode stream of characters to the HTMLTokenizer.
+
+ This class takes care of character encoding and removing or replacing
+ incorrect byte-sequences and also provides column and line tracking.
+
+ """
+
+ _defaultChunkSize = 10240
+
+ def __init__(self, source):
+ """Initialises the HTMLInputStream.
+
+ HTMLInputStream(source, [encoding]) -> Normalized stream from source
+ for use by html5lib.
+
+ source can be either a file-object, local filename or a string.
+
+ The optional encoding parameter must be a string that indicates
+ the encoding. If specified, that encoding will be used,
+ regardless of any BOM or later declaration (such as in a meta
+ element)
+
+ parseMeta - Look for a <meta> element containing encoding information
+
+ """
+
+ # Craziness
+ if len("\U0010FFFF") == 1:
+ self.reportCharacterErrors = self.characterErrorsUCS4
+ self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
+ else:
+ self.reportCharacterErrors = self.characterErrorsUCS2
+ self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
+
+ # List of where new lines occur
+ self.newLines = [0]
+
+ self.charEncoding = ("utf-8", "certain")
+ self.dataStream = self.openStream(source)
+
+ self.reset()
+
+ def reset(self):
+ self.chunk = ""
+ self.chunkSize = 0
+ self.chunkOffset = 0
+ self.errors = []
+
+ # number of (complete) lines in previous chunks
+ self.prevNumLines = 0
+ # number of columns in the last line of the previous chunk
+ self.prevNumCols = 0
+
+ # Deal with CR LF and surrogates split over chunk boundaries
+ self._bufferedCharacter = None
+
+ def openStream(self, source):
+ """Produces a file object from source.
+
+ source can be either a file object, local filename or a string.
+
+ """
+ # Already a file object
+ if hasattr(source, 'read'):
+ stream = source
+ else:
+ stream = StringIO(source)
+
+ return stream
+
+ def _position(self, offset):
+ chunk = self.chunk
+ nLines = chunk.count('\n', 0, offset)
+ positionLine = self.prevNumLines + nLines
+ lastLinePos = chunk.rfind('\n', 0, offset)
+ if lastLinePos == -1:
+ positionColumn = self.prevNumCols + offset
+ else:
+ positionColumn = offset - (lastLinePos + 1)
+ return (positionLine, positionColumn)
+
+ def position(self):
+ """Returns (line, col) of the current position in the stream."""
+ line, col = self._position(self.chunkOffset)
+ return (line + 1, col)
+
+ def char(self):
+ """ Read one character from the stream or queue if available. Return
+ EOF when EOF is reached.
+ """
+ # Read a new chunk from the input stream if necessary
+ if self.chunkOffset >= self.chunkSize:
+ if not self.readChunk():
+ return EOF
+
+ chunkOffset = self.chunkOffset
+ char = self.chunk[chunkOffset]
+ self.chunkOffset = chunkOffset + 1
+
+ return char
+
+ def readChunk(self, chunkSize=None):
+ if chunkSize is None:
+ chunkSize = self._defaultChunkSize
+
+ self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
+
+ self.chunk = ""
+ self.chunkSize = 0
+ self.chunkOffset = 0
+
+ data = self.dataStream.read(chunkSize)
+
+ # Deal with CR LF and surrogates broken across chunks
+ if self._bufferedCharacter:
+ data = self._bufferedCharacter + data
+ self._bufferedCharacter = None
+ elif not data:
+ # We have no more data, bye-bye stream
+ return False
+
+ if len(data) > 1:
+ lastv = ord(data[-1])
+ if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
+ self._bufferedCharacter = data[-1]
+ data = data[:-1]
+
+ self.reportCharacterErrors(data)
+
+ # Replace invalid characters
+ # Note U+0000 is dealt with in the tokenizer
+ data = self.replaceCharactersRegexp.sub("\ufffd", data)
+
+ data = data.replace("\r\n", "\n")
+ data = data.replace("\r", "\n")
+
+ self.chunk = data
+ self.chunkSize = len(data)
+
+ return True
+
+ def characterErrorsUCS4(self, data):
+ for i in range(len(invalid_unicode_re.findall(data))):
+ self.errors.append("invalid-codepoint")
+
+ def characterErrorsUCS2(self, data):
+ # Someone picked the wrong compile option
+ # You lose
+ skip = False
+ for match in invalid_unicode_re.finditer(data):
+ if skip:
+ continue
+ codepoint = ord(match.group())
+ pos = match.start()
+ # Pretty sure there should be endianness issues here
+ if utils.isSurrogatePair(data[pos:pos + 2]):
+ # We have a surrogate pair!
+ char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
+ if char_val in non_bmp_invalid_codepoints:
+ self.errors.append("invalid-codepoint")
+ skip = True
+ elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
+ pos == len(data) - 1):
+ self.errors.append("invalid-codepoint")
+ else:
+ skip = False
+ self.errors.append("invalid-codepoint")
+
+ def charsUntil(self, characters, opposite=False):
+ """ Returns a string of characters from the stream up to but not
+ including any character in 'characters' or EOF. 'characters' must be
+ a container that supports the 'in' method and iteration over its
+ characters.
+ """
+
+ # Use a cache of regexps to find the required characters
+ try:
+ chars = charsUntilRegEx[(characters, opposite)]
+ except KeyError:
+ if __debug__:
+ for c in characters:
+ assert(ord(c) < 128)
+ regex = "".join(["\\x%02x" % ord(c) for c in characters])
+ if not opposite:
+ regex = "^%s" % regex
+ chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
+
+ rv = []
+
+ while True:
+ # Find the longest matching prefix
+ m = chars.match(self.chunk, self.chunkOffset)
+ if m is None:
+ # If nothing matched, and it wasn't because we ran out of chunk,
+ # then stop
+ if self.chunkOffset != self.chunkSize:
+ break
+ else:
+ end = m.end()
+ # If not the whole chunk matched, return everything
+ # up to the part that didn't match
+ if end != self.chunkSize:
+ rv.append(self.chunk[self.chunkOffset:end])
+ self.chunkOffset = end
+ break
+ # If the whole remainder of the chunk matched,
+ # use it all and read the next chunk
+ rv.append(self.chunk[self.chunkOffset:])
+ if not self.readChunk():
+ # Reached EOF
+ break
+
+ r = "".join(rv)
+ return r
+
+ def unget(self, char):
+ # Only one character is allowed to be ungotten at once - it must
+ # be consumed again before any further call to unget
+ if char is not None:
+ if self.chunkOffset == 0:
+ # unget is called quite rarely, so it's a good idea to do
+ # more work here if it saves a bit of work in the frequently
+ # called char and charsUntil.
+ # So, just prepend the ungotten character onto the current
+ # chunk:
+ self.chunk = char + self.chunk
+ self.chunkSize += 1
+ else:
+ self.chunkOffset -= 1
+ assert self.chunk[self.chunkOffset] == char
+
+
+class HTMLBinaryInputStream(HTMLUnicodeInputStream):
+ """Provides a unicode stream of characters to the HTMLTokenizer.
+
+ This class takes care of character encoding and removing or replacing
+ incorrect byte-sequences and also provides column and line tracking.
+
+ """
+
+ def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
+ """Initialises the HTMLInputStream.
+
+ HTMLInputStream(source, [encoding]) -> Normalized stream from source
+ for use by html5lib.
+
+ source can be either a file-object, local filename or a string.
+
+ The optional encoding parameter must be a string that indicates
+ the encoding. If specified, that encoding will be used,
+ regardless of any BOM or later declaration (such as in a meta
+ element)
+
+ parseMeta - Look for a <meta> element containing encoding information
+
+ """
+ # Raw Stream - for unicode objects this will encode to utf-8 and set
+ # self.charEncoding as appropriate
+ self.rawStream = self.openStream(source)
+
+ HTMLUnicodeInputStream.__init__(self, self.rawStream)
+
+ self.charEncoding = (codecName(encoding), "certain")
+
+ # Encoding Information
+ # Number of bytes to use when looking for a meta element with
+ # encoding information
+ self.numBytesMeta = 512
+ # Number of bytes to use when using detecting encoding using chardet
+ self.numBytesChardet = 100
+ # Encoding to use if no other information can be found
+ self.defaultEncoding = "windows-1252"
+
+ # Detect encoding iff no explicit "transport level" encoding is supplied
+ if (self.charEncoding[0] is None):
+ self.charEncoding = self.detectEncoding(parseMeta, chardet)
+
+ # Call superclass
+ self.reset()
+
+ def reset(self):
+ self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
+ 'replace')
+ HTMLUnicodeInputStream.reset(self)
+
+ def openStream(self, source):
+ """Produces a file object from source.
+
+ source can be either a file object, local filename or a string.
+
+ """
+ # Already a file object
+ if hasattr(source, 'read'):
+ stream = source
+ else:
+ stream = BytesIO(source)
+
+ try:
+ stream.seek(stream.tell())
+ except:
+ stream = BufferedStream(stream)
+
+ return stream
+
+ def detectEncoding(self, parseMeta=True, chardet=True):
+ # First look for a BOM
+ # This will also read past the BOM if present
+ encoding = self.detectBOM()
+ confidence = "certain"
+ # If there is no BOM need to look for meta elements with encoding
+ # information
+ if encoding is None and parseMeta:
+ encoding = self.detectEncodingMeta()
+ confidence = "tentative"
+ # Guess with chardet, if avaliable
+ if encoding is None and chardet:
+ confidence = "tentative"
+ try:
+ try:
+ from charade.universaldetector import UniversalDetector
+ except ImportError:
+ from chardet.universaldetector import UniversalDetector
+ buffers = []
+ detector = UniversalDetector()
+ while not detector.done:
+ buffer = self.rawStream.read(self.numBytesChardet)
+ assert isinstance(buffer, bytes)
+ if not buffer:
+ break
+ buffers.append(buffer)
+ detector.feed(buffer)
+ detector.close()
+ encoding = detector.result['encoding']
+ self.rawStream.seek(0)
+ except ImportError:
+ pass
+ # If all else fails use the default encoding
+ if encoding is None:
+ confidence = "tentative"
+ encoding = self.defaultEncoding
+
+ # Substitute for equivalent encodings:
+ encodingSub = {"iso-8859-1": "windows-1252"}
+
+ if encoding.lower() in encodingSub:
+ encoding = encodingSub[encoding.lower()]
+
+ return encoding, confidence
+
+ def changeEncoding(self, newEncoding):
+ assert self.charEncoding[1] != "certain"
+ newEncoding = codecName(newEncoding)
+ if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
+ newEncoding = "utf-8"
+ if newEncoding is None:
+ return
+ elif newEncoding == self.charEncoding[0]:
+ self.charEncoding = (self.charEncoding[0], "certain")
+ else:
+ self.rawStream.seek(0)
+ self.reset()
+ self.charEncoding = (newEncoding, "certain")
+ raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
+
+ def detectBOM(self):
+ """Attempts to detect at BOM at the start of the stream. If
+ an encoding can be determined from the BOM return the name of the
+ encoding otherwise return None"""
+ bomDict = {
+ codecs.BOM_UTF8: 'utf-8',
+ codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
+ codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
+ }
+
+ # Go to beginning of file and read in 4 bytes
+ string = self.rawStream.read(4)
+ assert isinstance(string, bytes)
+
+ # Try detecting the BOM using bytes from the string
+ encoding = bomDict.get(string[:3]) # UTF-8
+ seek = 3
+ if not encoding:
+ # Need to detect UTF-32 before UTF-16
+ encoding = bomDict.get(string) # UTF-32
+ seek = 4
+ if not encoding:
+ encoding = bomDict.get(string[:2]) # UTF-16
+ seek = 2
+
+ # Set the read position past the BOM if one was found, otherwise
+ # set it to the start of the stream
+ self.rawStream.seek(encoding and seek or 0)
+
+ return encoding
+
+ def detectEncodingMeta(self):
+ """Report the encoding declared by the meta element
+ """
+ buffer = self.rawStream.read(self.numBytesMeta)
+ assert isinstance(buffer, bytes)
+ parser = EncodingParser(buffer)
+ self.rawStream.seek(0)
+ encoding = parser.getEncoding()
+
+ if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
+ encoding = "utf-8"
+
+ return encoding
+
+
+class EncodingBytes(bytes):
+ """String-like object with an associated position and various extra methods
+ If the position is ever greater than the string length then an exception is
+ raised"""
+ def __new__(self, value):
+ assert isinstance(value, bytes)
+ return bytes.__new__(self, value.lower())
+
+ def __init__(self, value):
+ self._position = -1
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ p = self._position = self._position + 1
+ if p >= len(self):
+ raise StopIteration
+ elif p < 0:
+ raise TypeError
+ return self[p:p + 1]
+
+ def next(self):
+ # Py2 compat
+ return self.__next__()
+
+ def previous(self):
+ p = self._position
+ if p >= len(self):
+ raise StopIteration
+ elif p < 0:
+ raise TypeError
+ self._position = p = p - 1
+ return self[p:p + 1]
+
+ def setPosition(self, position):
+ if self._position >= len(self):
+ raise StopIteration
+ self._position = position
+
+ def getPosition(self):
+ if self._position >= len(self):
+ raise StopIteration
+ if self._position >= 0:
+ return self._position
+ else:
+ return None
+
+ position = property(getPosition, setPosition)
+
+ def getCurrentByte(self):
+ return self[self.position:self.position + 1]
+
+ currentByte = property(getCurrentByte)
+
+ def skip(self, chars=spaceCharactersBytes):
+ """Skip past a list of characters"""
+ p = self.position # use property for the error-checking
+ while p < len(self):
+ c = self[p:p + 1]
+ if c not in chars:
+ self._position = p
+ return c
+ p += 1
+ self._position = p
+ return None
+
+ def skipUntil(self, chars):
+ p = self.position
+ while p < len(self):
+ c = self[p:p + 1]
+ if c in chars:
+ self._position = p
+ return c
+ p += 1
+ self._position = p
+ return None
+
+ def matchBytes(self, bytes):
+ """Look for a sequence of bytes at the start of a string. If the bytes
+ are found return True and advance the position to the byte after the
+ match. Otherwise return False and leave the position alone"""
+ p = self.position
+ data = self[p:p + len(bytes)]
+ rv = data.startswith(bytes)
+ if rv:
+ self.position += len(bytes)
+ return rv
+
+ def jumpTo(self, bytes):
+ """Look for the next sequence of bytes matching a given sequence. If
+ a match is found advance the position to the last byte of the match"""
+ newPosition = self[self.position:].find(bytes)
+ if newPosition > -1:
+ # XXX: This is ugly, but I can't see a nicer way to fix this.
+ if self._position == -1:
+ self._position = 0
+ self._position += (newPosition + len(bytes) - 1)
+ return True
+ else:
+ raise StopIteration
+
+
+class EncodingParser(object):
+ """Mini parser for detecting character encoding from meta elements"""
+
+ def __init__(self, data):
+ """string - the data to work on for encoding detection"""
+ self.data = EncodingBytes(data)
+ self.encoding = None
+
+ def getEncoding(self):
+ methodDispatch = (
+ (b"<!--", self.handleComment),
+ (b"<meta", self.handleMeta),
+ (b"</", self.handlePossibleEndTag),
+ (b"<!", self.handleOther),
+ (b"<?", self.handleOther),
+ (b"<", self.handlePossibleStartTag))
+ for byte in self.data:
+ keepParsing = True
+ for key, method in methodDispatch:
+ if self.data.matchBytes(key):
+ try:
+ keepParsing = method()
+ break
+ except StopIteration:
+ keepParsing = False
+ break
+ if not keepParsing:
+ break
+
+ return self.encoding
+
+ def handleComment(self):
+ """Skip over comments"""
+ return self.data.jumpTo(b"-->")
+
+ def handleMeta(self):
+ if self.data.currentByte not in spaceCharactersBytes:
+ # if we have <meta not followed by a space so just keep going
+ return True
+ # We have a valid meta element we want to search for attributes
+ hasPragma = False
+ pendingEncoding = None
+ while True:
+ # Try to find the next attribute after the current position
+ attr = self.getAttribute()
+ if attr is None:
+ return True
+ else:
+ if attr[0] == b"http-equiv":
+ hasPragma = attr[1] == b"content-type"
+ if hasPragma and pendingEncoding is not None:
+ self.encoding = pendingEncoding
+ return False
+ elif attr[0] == b"charset":
+ tentativeEncoding = attr[1]
+ codec = codecName(tentativeEncoding)
+ if codec is not None:
+ self.encoding = codec
+ return False
+ elif attr[0] == b"content":
+ contentParser = ContentAttrParser(EncodingBytes(attr[1]))
+ tentativeEncoding = contentParser.parse()
+ if tentativeEncoding is not None:
+ codec = codecName(tentativeEncoding)
+ if codec is not None:
+ if hasPragma:
+ self.encoding = codec
+ return False
+ else:
+ pendingEncoding = codec
+
+ def handlePossibleStartTag(self):
+ return self.handlePossibleTag(False)
+
+ def handlePossibleEndTag(self):
+ next(self.data)
+ return self.handlePossibleTag(True)
+
+ def handlePossibleTag(self, endTag):
+ data = self.data
+ if data.currentByte not in asciiLettersBytes:
+ # If the next byte is not an ascii letter either ignore this
+ # fragment (possible start tag case) or treat it according to
+ # handleOther
+ if endTag:
+ data.previous()
+ self.handleOther()
+ return True
+
+ c = data.skipUntil(spacesAngleBrackets)
+ if c == b"<":
+ # return to the first step in the overall "two step" algorithm
+ # reprocessing the < byte
+ data.previous()
+ else:
+ # Read all attributes
+ attr = self.getAttribute()
+ while attr is not None:
+ attr = self.getAttribute()
+ return True
+
+ def handleOther(self):
+ return self.data.jumpTo(b">")
+
+ def getAttribute(self):
+ """Return a name,value pair for the next attribute in the stream,
+ if one is found, or None"""
+ data = self.data
+ # Step 1 (skip chars)
+ c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
+ assert c is None or len(c) == 1
+ # Step 2
+ if c in (b">", None):
+ return None
+ # Step 3
+ attrName = []
+ attrValue = []
+ # Step 4 attribute name
+ while True:
+ if c == b"=" and attrName:
+ break
+ elif c in spaceCharactersBytes:
+ # Step 6!
+ c = data.skip()
+ break
+ elif c in (b"/", b">"):
+ return b"".join(attrName), b""
+ elif c in asciiUppercaseBytes:
+ attrName.append(c.lower())
+ elif c is None:
+ return None
+ else:
+ attrName.append(c)
+ # Step 5
+ c = next(data)
+ # Step 7
+ if c != b"=":
+ data.previous()
+ return b"".join(attrName), b""
+ # Step 8
+ next(data)
+ # Step 9
+ c = data.skip()
+ # Step 10
+ if c in (b"'", b'"'):
+ # 10.1
+ quoteChar = c
+ while True:
+ # 10.2
+ c = next(data)
+ # 10.3
+ if c == quoteChar:
+ next(data)
+ return b"".join(attrName), b"".join(attrValue)
+ # 10.4
+ elif c in asciiUppercaseBytes:
+ attrValue.append(c.lower())
+ # 10.5
+ else:
+ attrValue.append(c)
+ elif c == b">":
+ return b"".join(attrName), b""
+ elif c in asciiUppercaseBytes:
+ attrValue.append(c.lower())
+ elif c is None:
+ return None
+ else:
+ attrValue.append(c)
+ # Step 11
+ while True:
+ c = next(data)
+ if c in spacesAngleBrackets:
+ return b"".join(attrName), b"".join(attrValue)
+ elif c in asciiUppercaseBytes:
+ attrValue.append(c.lower())
+ elif c is None:
+ return None
+ else:
+ attrValue.append(c)
+
+
+class ContentAttrParser(object):
+ def __init__(self, data):
+ assert isinstance(data, bytes)
+ self.data = data
+
+ def parse(self):
+ try:
+ # Check if the attr name is charset
+ # otherwise return
+ self.data.jumpTo(b"charset")
+ self.data.position += 1
+ self.data.skip()
+ if not self.data.currentByte == b"=":
+ # If there is no = sign keep looking for attrs
+ return None
+ self.data.position += 1
+ self.data.skip()
+ # Look for an encoding between matching quote marks
+ if self.data.currentByte in (b'"', b"'"):
+ quoteMark = self.data.currentByte
+ self.data.position += 1
+ oldPosition = self.data.position
+ if self.data.jumpTo(quoteMark):
+ return self.data[oldPosition:self.data.position]
+ else:
+ return None
+ else:
+ # Unquoted value
+ oldPosition = self.data.position
+ try:
+ self.data.skipUntil(spaceCharactersBytes)
+ return self.data[oldPosition:self.data.position]
+ except StopIteration:
+ # Return the whole remaining value
+ return self.data[oldPosition:]
+ except StopIteration:
+ return None
+
+
+def codecName(encoding):
+ """Return the python codec name corresponding to an encoding or None if the
+ string doesn't correspond to a valid encoding."""
+ if isinstance(encoding, bytes):
+ try:
+ encoding = encoding.decode("ascii")
+ except UnicodeDecodeError:
+ return None
+ if encoding:
+ canonicalName = ascii_punctuation_re.sub("", encoding).lower()
+ return encodings.get(canonicalName, None)
+ else:
+ return None
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/sanitizer.py b/testing/web-platform/tests/tools/html5lib/html5lib/sanitizer.py
new file mode 100644
index 000000000..469d9b40c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/sanitizer.py
@@ -0,0 +1,271 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import re
+from xml.sax.saxutils import escape, unescape
+
+from .tokenizer import HTMLTokenizer
+from .constants import tokenTypes
+
+
+class HTMLSanitizerMixin(object):
+ """ sanitization of XHTML+MathML+SVG and of inline style attributes."""
+
+ acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
+ 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
+ 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
+ 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
+ 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
+ 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
+ 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
+ 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
+ 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
+ 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
+ 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
+ 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
+ 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
+
+ mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
+ 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
+ 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
+ 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
+ 'munderover', 'none']
+
+ svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
+ 'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
+ 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
+ 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
+ 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
+ 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
+
+ acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
+ 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
+ 'background', 'balance', 'bgcolor', 'bgproperties', 'border',
+ 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
+ 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
+ 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
+ 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
+ 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
+ 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
+ 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
+ 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
+ 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
+ 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
+ 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
+ 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
+ 'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
+ 'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
+ 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
+ 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
+ 'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
+ 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
+ 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
+ 'width', 'wrap', 'xml:lang']
+
+ mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
+ 'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
+ 'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
+ 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
+ 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
+ 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
+ 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
+ 'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
+ 'xlink:type', 'xmlns', 'xmlns:xlink']
+
+ svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
+ 'arabic-form', 'ascent', 'attributeName', 'attributeType',
+ 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
+ 'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
+ 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
+ 'fill-opacity', 'fill-rule', 'font-family', 'font-size',
+ 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
+ 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
+ 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
+ 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
+ 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
+ 'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
+ 'opacity', 'orient', 'origin', 'overline-position',
+ 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
+ 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
+ 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
+ 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
+ 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
+ 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
+ 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
+ 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
+ 'transform', 'type', 'u1', 'u2', 'underline-position',
+ 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
+ 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
+ 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
+ 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
+ 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
+ 'y1', 'y2', 'zoomAndPan']
+
+ attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
+ 'xlink:href', 'xml:base']
+
+ svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
+ 'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
+ 'mask', 'stroke']
+
+ svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
+ 'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
+ 'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
+ 'set', 'use']
+
+ acceptable_css_properties = ['azimuth', 'background-color',
+ 'border-bottom-color', 'border-collapse', 'border-color',
+ 'border-left-color', 'border-right-color', 'border-top-color', 'clear',
+ 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
+ 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
+ 'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
+ 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
+ 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
+ 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
+ 'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
+ 'white-space', 'width']
+
+ acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
+ 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
+ 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
+ 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
+ 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
+ 'transparent', 'underline', 'white', 'yellow']
+
+ acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
+ 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
+ 'stroke-opacity']
+
+ acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
+ 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
+ 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
+ 'ssh', 'sftp', 'rtsp', 'afs']
+
+ # subclasses may define their own versions of these constants
+ allowed_elements = acceptable_elements + mathml_elements + svg_elements
+ allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
+ allowed_css_properties = acceptable_css_properties
+ allowed_css_keywords = acceptable_css_keywords
+ allowed_svg_properties = acceptable_svg_properties
+ allowed_protocols = acceptable_protocols
+
+ # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
+ # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
+ # attributes are parsed, and a restricted set, # specified by
+ # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
+ # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
+ # in ALLOWED_PROTOCOLS are allowed.
+ #
+ # sanitize_html('<script> do_nasty_stuff() </script>')
+ # => &lt;script> do_nasty_stuff() &lt;/script>
+ # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
+ # => <a>Click here for $100</a>
+ def sanitize_token(self, token):
+
+ # accommodate filters which use token_type differently
+ token_type = token["type"]
+ if token_type in list(tokenTypes.keys()):
+ token_type = tokenTypes[token_type]
+
+ if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
+ tokenTypes["EmptyTag"]):
+ if token["name"] in self.allowed_elements:
+ return self.allowed_token(token, token_type)
+ else:
+ return self.disallowed_token(token, token_type)
+ elif token_type == tokenTypes["Comment"]:
+ pass
+ else:
+ return token
+
+ def allowed_token(self, token, token_type):
+ if "data" in token:
+ attrs = dict([(name, val) for name, val in
+ token["data"][::-1]
+ if name in self.allowed_attributes])
+ for attr in self.attr_val_is_uri:
+ if attr not in attrs:
+ continue
+ val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
+ unescape(attrs[attr])).lower()
+ # remove replacement characters from unescaped characters
+ val_unescaped = val_unescaped.replace("\ufffd", "")
+ if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
+ (val_unescaped.split(':')[0] not in
+ self.allowed_protocols)):
+ del attrs[attr]
+ for attr in self.svg_attr_val_allows_ref:
+ if attr in attrs:
+ attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
+ ' ',
+ unescape(attrs[attr]))
+ if (token["name"] in self.svg_allow_local_href and
+ 'xlink:href' in attrs and re.search('^\s*[^#\s].*',
+ attrs['xlink:href'])):
+ del attrs['xlink:href']
+ if 'style' in attrs:
+ attrs['style'] = self.sanitize_css(attrs['style'])
+ token["data"] = [[name, val] for name, val in list(attrs.items())]
+ return token
+
+ def disallowed_token(self, token, token_type):
+ if token_type == tokenTypes["EndTag"]:
+ token["data"] = "</%s>" % token["name"]
+ elif token["data"]:
+ attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
+ token["data"] = "<%s%s>" % (token["name"], attrs)
+ else:
+ token["data"] = "<%s>" % token["name"]
+ if token.get("selfClosing"):
+ token["data"] = token["data"][:-1] + "/>"
+
+ if token["type"] in list(tokenTypes.keys()):
+ token["type"] = "Characters"
+ else:
+ token["type"] = tokenTypes["Characters"]
+
+ del token["name"]
+ return token
+
+ def sanitize_css(self, style):
+ # disallow urls
+ style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
+
+ # gauntlet
+ if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
+ return ''
+ if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
+ return ''
+
+ clean = []
+ for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
+ if not value:
+ continue
+ if prop.lower() in self.allowed_css_properties:
+ clean.append(prop + ': ' + value + ';')
+ elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
+ 'padding']:
+ for keyword in value.split():
+ if keyword not in self.acceptable_css_keywords and \
+ not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
+ break
+ else:
+ clean.append(prop + ': ' + value + ';')
+ elif prop.lower() in self.allowed_svg_properties:
+ clean.append(prop + ': ' + value + ';')
+
+ return ' '.join(clean)
+
+
+class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
+ def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
+ lowercaseElementName=False, lowercaseAttrName=False, parser=None):
+ # Change case matching defaults as we only output lowercase html anyway
+ # This solution doesn't seem ideal...
+ HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
+ lowercaseElementName, lowercaseAttrName, parser=parser)
+
+ def __iter__(self):
+ for token in HTMLTokenizer.__iter__(self):
+ token = self.sanitize_token(token)
+ if token:
+ yield token
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/serializer/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/serializer/__init__.py
new file mode 100644
index 000000000..8380839a6
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/serializer/__init__.py
@@ -0,0 +1,16 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from .. import treewalkers
+
+from .htmlserializer import HTMLSerializer
+
+
+def serialize(input, tree="etree", format="html", encoding=None,
+ **serializer_opts):
+ # XXX: Should we cache this?
+ walker = treewalkers.getTreeWalker(tree)
+ if format == "html":
+ s = HTMLSerializer(**serializer_opts)
+ else:
+ raise ValueError("type must be html")
+ return s.render(walker(input), encoding)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/serializer/htmlserializer.py b/testing/web-platform/tests/tools/html5lib/html5lib/serializer/htmlserializer.py
new file mode 100644
index 000000000..4a891ff56
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/serializer/htmlserializer.py
@@ -0,0 +1,320 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+
+import gettext
+_ = gettext.gettext
+
+try:
+ from functools import reduce
+except ImportError:
+ pass
+
+from ..constants import voidElements, booleanAttributes, spaceCharacters
+from ..constants import rcdataElements, entities, xmlEntities
+from .. import utils
+from xml.sax.saxutils import escape
+
+spaceCharacters = "".join(spaceCharacters)
+
+try:
+ from codecs import register_error, xmlcharrefreplace_errors
+except ImportError:
+ unicode_encode_errors = "strict"
+else:
+ unicode_encode_errors = "htmlentityreplace"
+
+ encode_entity_map = {}
+ is_ucs4 = len("\U0010FFFF") == 1
+ for k, v in list(entities.items()):
+ # skip multi-character entities
+ if ((is_ucs4 and len(v) > 1) or
+ (not is_ucs4 and len(v) > 2)):
+ continue
+ if v != "&":
+ if len(v) == 2:
+ v = utils.surrogatePairToCodepoint(v)
+ else:
+ v = ord(v)
+ if v not in encode_entity_map or k.islower():
+ # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc.
+ encode_entity_map[v] = k
+
+ def htmlentityreplace_errors(exc):
+ if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
+ res = []
+ codepoints = []
+ skip = False
+ for i, c in enumerate(exc.object[exc.start:exc.end]):
+ if skip:
+ skip = False
+ continue
+ index = i + exc.start
+ if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
+ codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
+ skip = True
+ else:
+ codepoint = ord(c)
+ codepoints.append(codepoint)
+ for cp in codepoints:
+ e = encode_entity_map.get(cp)
+ if e:
+ res.append("&")
+ res.append(e)
+ if not e.endswith(";"):
+ res.append(";")
+ else:
+ res.append("&#x%s;" % (hex(cp)[2:]))
+ return ("".join(res), exc.end)
+ else:
+ return xmlcharrefreplace_errors(exc)
+
+ register_error(unicode_encode_errors, htmlentityreplace_errors)
+
+ del register_error
+
+
+class HTMLSerializer(object):
+
+ # attribute quoting options
+ quote_attr_values = False
+ quote_char = '"'
+ use_best_quote_char = True
+
+ # tag syntax options
+ omit_optional_tags = True
+ minimize_boolean_attributes = True
+ use_trailing_solidus = False
+ space_before_trailing_solidus = True
+
+ # escaping options
+ escape_lt_in_attrs = False
+ escape_rcdata = False
+ resolve_entities = True
+
+ # miscellaneous options
+ alphabetical_attributes = False
+ inject_meta_charset = True
+ strip_whitespace = False
+ sanitize = False
+
+ options = ("quote_attr_values", "quote_char", "use_best_quote_char",
+ "omit_optional_tags", "minimize_boolean_attributes",
+ "use_trailing_solidus", "space_before_trailing_solidus",
+ "escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
+ "alphabetical_attributes", "inject_meta_charset",
+ "strip_whitespace", "sanitize")
+
+ def __init__(self, **kwargs):
+ """Initialize HTMLSerializer.
+
+ Keyword options (default given first unless specified) include:
+
+ inject_meta_charset=True|False
+ Whether it insert a meta element to define the character set of the
+ document.
+ quote_attr_values=True|False
+ Whether to quote attribute values that don't require quoting
+ per HTML5 parsing rules.
+ quote_char=u'"'|u"'"
+ Use given quote character for attribute quoting. Default is to
+ use double quote unless attribute value contains a double quote,
+ in which case single quotes are used instead.
+ escape_lt_in_attrs=False|True
+ Whether to escape < in attribute values.
+ escape_rcdata=False|True
+ Whether to escape characters that need to be escaped within normal
+ elements within rcdata elements such as style.
+ resolve_entities=True|False
+ Whether to resolve named character entities that appear in the
+ source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos;
+ are unaffected by this setting.
+ strip_whitespace=False|True
+ Whether to remove semantically meaningless whitespace. (This
+ compresses all whitespace to a single space except within pre.)
+ minimize_boolean_attributes=True|False
+ Shortens boolean attributes to give just the attribute value,
+ for example <input disabled="disabled"> becomes <input disabled>.
+ use_trailing_solidus=False|True
+ Includes a close-tag slash at the end of the start tag of void
+ elements (empty elements whose end tag is forbidden). E.g. <hr/>.
+ space_before_trailing_solidus=True|False
+ Places a space immediately before the closing slash in a tag
+ using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
+ sanitize=False|True
+ Strip all unsafe or unknown constructs from output.
+ See `html5lib user documentation`_
+ omit_optional_tags=True|False
+ Omit start/end tags that are optional.
+ alphabetical_attributes=False|True
+ Reorder attributes to be in alphabetical order.
+
+ .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
+ """
+ if 'quote_char' in kwargs:
+ self.use_best_quote_char = False
+ for attr in self.options:
+ setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
+ self.errors = []
+ self.strict = False
+
+ def encode(self, string):
+ assert(isinstance(string, text_type))
+ if self.encoding:
+ return string.encode(self.encoding, unicode_encode_errors)
+ else:
+ return string
+
+ def encodeStrict(self, string):
+ assert(isinstance(string, text_type))
+ if self.encoding:
+ return string.encode(self.encoding, "strict")
+ else:
+ return string
+
+ def serialize(self, treewalker, encoding=None):
+ self.encoding = encoding
+ in_cdata = False
+ self.errors = []
+
+ if encoding and self.inject_meta_charset:
+ from ..filters.inject_meta_charset import Filter
+ treewalker = Filter(treewalker, encoding)
+ # WhitespaceFilter should be used before OptionalTagFilter
+ # for maximum efficiently of this latter filter
+ if self.strip_whitespace:
+ from ..filters.whitespace import Filter
+ treewalker = Filter(treewalker)
+ if self.sanitize:
+ from ..filters.sanitizer import Filter
+ treewalker = Filter(treewalker)
+ if self.omit_optional_tags:
+ from ..filters.optionaltags import Filter
+ treewalker = Filter(treewalker)
+ # Alphabetical attributes must be last, as other filters
+ # could add attributes and alter the order
+ if self.alphabetical_attributes:
+ from ..filters.alphabeticalattributes import Filter
+ treewalker = Filter(treewalker)
+
+ for token in treewalker:
+ type = token["type"]
+ if type == "Doctype":
+ doctype = "<!DOCTYPE %s" % token["name"]
+
+ if token["publicId"]:
+ doctype += ' PUBLIC "%s"' % token["publicId"]
+ elif token["systemId"]:
+ doctype += " SYSTEM"
+ if token["systemId"]:
+ if token["systemId"].find('"') >= 0:
+ if token["systemId"].find("'") >= 0:
+ self.serializeError(_("System identifer contains both single and double quote characters"))
+ quote_char = "'"
+ else:
+ quote_char = '"'
+ doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
+
+ doctype += ">"
+ yield self.encodeStrict(doctype)
+
+ elif type in ("Characters", "SpaceCharacters"):
+ if type == "SpaceCharacters" or in_cdata:
+ if in_cdata and token["data"].find("</") >= 0:
+ self.serializeError(_("Unexpected </ in CDATA"))
+ yield self.encode(token["data"])
+ else:
+ yield self.encode(escape(token["data"]))
+
+ elif type in ("StartTag", "EmptyTag"):
+ name = token["name"]
+ yield self.encodeStrict("<%s" % name)
+ if name in rcdataElements and not self.escape_rcdata:
+ in_cdata = True
+ elif in_cdata:
+ self.serializeError(_("Unexpected child element of a CDATA element"))
+ for (attr_namespace, attr_name), attr_value in token["data"].items():
+ # TODO: Add namespace support here
+ k = attr_name
+ v = attr_value
+ yield self.encodeStrict(' ')
+
+ yield self.encodeStrict(k)
+ if not self.minimize_boolean_attributes or \
+ (k not in booleanAttributes.get(name, tuple())
+ and k not in booleanAttributes.get("", tuple())):
+ yield self.encodeStrict("=")
+ if self.quote_attr_values or not v:
+ quote_attr = True
+ else:
+ quote_attr = reduce(lambda x, y: x or (y in v),
+ spaceCharacters + ">\"'=", False)
+ v = v.replace("&", "&amp;")
+ if self.escape_lt_in_attrs:
+ v = v.replace("<", "&lt;")
+ if quote_attr:
+ quote_char = self.quote_char
+ if self.use_best_quote_char:
+ if "'" in v and '"' not in v:
+ quote_char = '"'
+ elif '"' in v and "'" not in v:
+ quote_char = "'"
+ if quote_char == "'":
+ v = v.replace("'", "&#39;")
+ else:
+ v = v.replace('"', "&quot;")
+ yield self.encodeStrict(quote_char)
+ yield self.encode(v)
+ yield self.encodeStrict(quote_char)
+ else:
+ yield self.encode(v)
+ if name in voidElements and self.use_trailing_solidus:
+ if self.space_before_trailing_solidus:
+ yield self.encodeStrict(" /")
+ else:
+ yield self.encodeStrict("/")
+ yield self.encode(">")
+
+ elif type == "EndTag":
+ name = token["name"]
+ if name in rcdataElements:
+ in_cdata = False
+ elif in_cdata:
+ self.serializeError(_("Unexpected child element of a CDATA element"))
+ yield self.encodeStrict("</%s>" % name)
+
+ elif type == "Comment":
+ data = token["data"]
+ if data.find("--") >= 0:
+ self.serializeError(_("Comment contains --"))
+ yield self.encodeStrict("<!--%s-->" % token["data"])
+
+ elif type == "Entity":
+ name = token["name"]
+ key = name + ";"
+ if key not in entities:
+ self.serializeError(_("Entity %s not recognized" % name))
+ if self.resolve_entities and key not in xmlEntities:
+ data = entities[key]
+ else:
+ data = "&%s;" % name
+ yield self.encodeStrict(data)
+
+ else:
+ self.serializeError(token["data"])
+
+ def render(self, treewalker, encoding=None):
+ if encoding:
+ return b"".join(list(self.serialize(treewalker, encoding)))
+ else:
+ return "".join(list(self.serialize(treewalker)))
+
+ def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
+ # XXX The idea is to make data mandatory.
+ self.errors.append(data)
+ if self.strict:
+ raise SerializeError
+
+
+def SerializeError(Exception):
+ """Error in serialized tree"""
+ pass
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/README b/testing/web-platform/tests/tools/html5lib/html5lib/tests/README
new file mode 100644
index 000000000..c564b6836
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/README
@@ -0,0 +1 @@
+Each testcase file can be run through nose (using ``nosetests``). \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/__init__.py
new file mode 100644
index 000000000..b8ce2de32
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import, division, unicode_literals
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/mockParser.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/mockParser.py
new file mode 100644
index 000000000..ef31527e8
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/mockParser.py
@@ -0,0 +1,41 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import sys
+import os
+
+if __name__ == '__main__':
+ # Allow us to import from the src directory
+ os.chdir(os.path.split(os.path.abspath(__file__))[0])
+ sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, "src")))
+
+from html5lib.tokenizer import HTMLTokenizer
+
+
+class HTMLParser(object):
+ """ Fake parser to test tokenizer output """
+ def parse(self, stream, output=True):
+ tokenizer = HTMLTokenizer(stream)
+ for token in tokenizer:
+ if output:
+ print(token)
+
+if __name__ == "__main__":
+ x = HTMLParser()
+ if len(sys.argv) > 1:
+ if len(sys.argv) > 2:
+ import hotshot
+ import hotshot.stats
+ prof = hotshot.Profile('stats.prof')
+ prof.runcall(x.parse, sys.argv[1], False)
+ prof.close()
+ stats = hotshot.stats.load('stats.prof')
+ stats.strip_dirs()
+ stats.sort_stats('time')
+ stats.print_stats()
+ else:
+ x.parse(sys.argv[1])
+ else:
+ print("""Usage: python mockParser.py filename [stats]
+ If stats is specified the hotshots profiler will run and output the
+ stats instead.
+ """)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/performance/concatenation.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/performance/concatenation.py
new file mode 100644
index 000000000..a1465036e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/performance/concatenation.py
@@ -0,0 +1,36 @@
+from __future__ import absolute_import, division, unicode_literals
+
+
+def f1():
+ x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ x += y + z
+
+
+def f2():
+ x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ x = x + y + z
+
+
+def f3():
+ x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ x = "".join((x, y, z))
+
+
+def f4():
+ x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ x = "%s%s%s" % (x, y, z)
+
+import timeit
+for x in range(4):
+ statement = "f%s" % (x + 1)
+ t = timeit.Timer(statement, "from __main__ import " + statement)
+ r = t.repeat(3, 1000000)
+ print(r, min(r))
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/support.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/support.py
new file mode 100644
index 000000000..41f2d2a07
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/support.py
@@ -0,0 +1,177 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import os
+import sys
+import codecs
+import glob
+import xml.sax.handler
+
+base_path = os.path.split(__file__)[0]
+
+test_dir = os.path.join(base_path, 'testdata')
+sys.path.insert(0, os.path.abspath(os.path.join(base_path,
+ os.path.pardir,
+ os.path.pardir)))
+
+from html5lib import treebuilders
+del base_path
+
+# Build a dict of avaliable trees
+treeTypes = {"DOM": treebuilders.getTreeBuilder("dom")}
+
+# Try whatever etree implementations are avaliable from a list that are
+#"supposed" to work
+try:
+ import xml.etree.ElementTree as ElementTree
+ treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
+except ImportError:
+ try:
+ import elementtree.ElementTree as ElementTree
+ treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
+ except ImportError:
+ pass
+
+try:
+ import xml.etree.cElementTree as cElementTree
+ treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
+except ImportError:
+ try:
+ import cElementTree
+ treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
+ except ImportError:
+ pass
+
+try:
+ import lxml.etree as lxml # flake8: noqa
+except ImportError:
+ pass
+else:
+ treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml")
+
+
+def get_data_files(subdirectory, files='*.dat'):
+ return glob.glob(os.path.join(test_dir, subdirectory, files))
+
+
+class DefaultDict(dict):
+ def __init__(self, default, *args, **kwargs):
+ self.default = default
+ dict.__init__(self, *args, **kwargs)
+
+ def __getitem__(self, key):
+ return dict.get(self, key, self.default)
+
+
+class TestData(object):
+ def __init__(self, filename, newTestHeading="data", encoding="utf8"):
+ if encoding is None:
+ self.f = open(filename, mode="rb")
+ else:
+ self.f = codecs.open(filename, encoding=encoding)
+ self.encoding = encoding
+ self.newTestHeading = newTestHeading
+
+ def __del__(self):
+ self.f.close()
+
+ def __iter__(self):
+ data = DefaultDict(None)
+ key = None
+ for line in self.f:
+ heading = self.isSectionHeading(line)
+ if heading:
+ if data and heading == self.newTestHeading:
+ # Remove trailing newline
+ data[key] = data[key][:-1]
+ yield self.normaliseOutput(data)
+ data = DefaultDict(None)
+ key = heading
+ data[key] = "" if self.encoding else b""
+ elif key is not None:
+ data[key] += line
+ if data:
+ yield self.normaliseOutput(data)
+
+ def isSectionHeading(self, line):
+ """If the current heading is a test section heading return the heading,
+ otherwise return False"""
+ # print(line)
+ if line.startswith("#" if self.encoding else b"#"):
+ return line[1:].strip()
+ else:
+ return False
+
+ def normaliseOutput(self, data):
+ # Remove trailing newlines
+ for key, value in data.items():
+ if value.endswith("\n" if self.encoding else b"\n"):
+ data[key] = value[:-1]
+ return data
+
+
+def convert(stripChars):
+ def convertData(data):
+ """convert the output of str(document) to the format used in the testcases"""
+ data = data.split("\n")
+ rv = []
+ for line in data:
+ if line.startswith("|"):
+ rv.append(line[stripChars:])
+ else:
+ rv.append(line)
+ return "\n".join(rv)
+ return convertData
+
+convertExpected = convert(2)
+
+
+def errorMessage(input, expected, actual):
+ msg = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" %
+ (repr(input), repr(expected), repr(actual)))
+ if sys.version_info.major == 2:
+ msg = msg.encode("ascii", "backslashreplace")
+ return msg
+
+
+class TracingSaxHandler(xml.sax.handler.ContentHandler):
+ def __init__(self):
+ xml.sax.handler.ContentHandler.__init__(self)
+ self.visited = []
+
+ def startDocument(self):
+ self.visited.append('startDocument')
+
+ def endDocument(self):
+ self.visited.append('endDocument')
+
+ def startPrefixMapping(self, prefix, uri):
+ # These are ignored as their order is not guaranteed
+ pass
+
+ def endPrefixMapping(self, prefix):
+ # These are ignored as their order is not guaranteed
+ pass
+
+ def startElement(self, name, attrs):
+ self.visited.append(('startElement', name, attrs))
+
+ def endElement(self, name):
+ self.visited.append(('endElement', name))
+
+ def startElementNS(self, name, qname, attrs):
+ self.visited.append(('startElementNS', name, qname, dict(attrs)))
+
+ def endElementNS(self, name, qname):
+ self.visited.append(('endElementNS', name, qname))
+
+ def characters(self, content):
+ self.visited.append(('characters', content))
+
+ def ignorableWhitespace(self, whitespace):
+ self.visited.append(('ignorableWhitespace', whitespace))
+
+ def processingInstruction(self, target, data):
+ self.visited.append(('processingInstruction', target, data))
+
+ def skippedEntity(self, name):
+ self.visited.append(('skippedEntity', name))
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_encoding.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_encoding.py
new file mode 100644
index 000000000..d774ce0f6
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_encoding.py
@@ -0,0 +1,67 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import os
+import unittest
+
+try:
+ unittest.TestCase.assertEqual
+except AttributeError:
+ unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
+
+from .support import get_data_files, TestData, test_dir, errorMessage
+from html5lib import HTMLParser, inputstream
+
+
+class Html5EncodingTestCase(unittest.TestCase):
+ def test_codec_name_a(self):
+ self.assertEqual(inputstream.codecName("utf-8"), "utf-8")
+
+ def test_codec_name_b(self):
+ self.assertEqual(inputstream.codecName("utf8"), "utf-8")
+
+ def test_codec_name_c(self):
+ self.assertEqual(inputstream.codecName(" utf8 "), "utf-8")
+
+ def test_codec_name_d(self):
+ self.assertEqual(inputstream.codecName("ISO_8859--1"), "windows-1252")
+
+
+def runParserEncodingTest(data, encoding):
+ p = HTMLParser()
+ assert p.documentEncoding is None
+ p.parse(data, useChardet=False)
+ encoding = encoding.lower().decode("ascii")
+
+ assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
+
+
+def runPreScanEncodingTest(data, encoding):
+ stream = inputstream.HTMLBinaryInputStream(data, chardet=False)
+ encoding = encoding.lower().decode("ascii")
+
+ # Very crude way to ignore irrelevant tests
+ if len(data) > stream.numBytesMeta:
+ return
+
+ assert encoding == stream.charEncoding[0], errorMessage(data, encoding, stream.charEncoding[0])
+
+
+def test_encoding():
+ for filename in get_data_files("encoding"):
+ tests = TestData(filename, b"data", encoding=None)
+ for idx, test in enumerate(tests):
+ yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
+ yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
+
+try:
+ try:
+ import charade # flake8: noqa
+ except ImportError:
+ import chardet # flake8: noqa
+except ImportError:
+ print("charade/chardet not found, skipping chardet tests")
+else:
+ def test_chardet():
+ with open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt"), "rb") as fp:
+ encoding = inputstream.HTMLInputStream(fp.read()).charEncoding
+ assert encoding[0].lower() == "big5"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser.py
new file mode 100644
index 000000000..230cdb42d
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser.py
@@ -0,0 +1,96 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import os
+import sys
+import traceback
+import warnings
+import re
+
+warnings.simplefilter("error")
+
+from .support import get_data_files
+from .support import TestData, convert, convertExpected, treeTypes
+from html5lib import html5parser, constants
+
+# Run the parse error checks
+checkParseErrors = False
+
+# XXX - There should just be one function here but for some reason the testcase
+# format differs from the treedump format by a single space character
+
+
+def convertTreeDump(data):
+ return "\n".join(convert(3)(data).split("\n")[1:])
+
+namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
+
+
+def runParserTest(innerHTML, input, expected, errors, treeClass,
+ namespaceHTMLElements):
+ with warnings.catch_warnings(record=True) as caughtWarnings:
+ warnings.simplefilter("always")
+ p = html5parser.HTMLParser(tree=treeClass,
+ namespaceHTMLElements=namespaceHTMLElements)
+
+ try:
+ if innerHTML:
+ document = p.parseFragment(input, innerHTML)
+ else:
+ document = p.parse(input)
+ except:
+ errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected,
+ "\nTraceback:", traceback.format_exc()])
+ assert False, errorMsg
+
+ otherWarnings = [x for x in caughtWarnings
+ if not issubclass(x.category, constants.DataLossWarning)]
+ assert len(otherWarnings) == 0, [(x.category, x.message) for x in otherWarnings]
+ if len(caughtWarnings):
+ return
+
+ output = convertTreeDump(p.tree.testSerializer(document))
+
+ expected = convertExpected(expected)
+ if namespaceHTMLElements:
+ expected = namespaceExpected(r"\1<html \2>", expected)
+
+ errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected,
+ "\nReceived:", output])
+ assert expected == output, errorMsg
+
+ errStr = []
+ for (line, col), errorcode, datavars in p.errors:
+ assert isinstance(datavars, dict), "%s, %s" % (errorcode, repr(datavars))
+ errStr.append("Line: %i Col: %i %s" % (line, col,
+ constants.E[errorcode] % datavars))
+
+ errorMsg2 = "\n".join(["\n\nInput:", input,
+ "\nExpected errors (" + str(len(errors)) + "):\n" + "\n".join(errors),
+ "\nActual errors (" + str(len(p.errors)) + "):\n" + "\n".join(errStr)])
+ if checkParseErrors:
+ assert len(p.errors) == len(errors), errorMsg2
+
+
+def test_parser():
+ sys.stderr.write('Testing tree builders ' + " ".join(list(treeTypes.keys())) + "\n")
+ files = get_data_files('tree-construction')
+
+ for filename in files:
+ testName = os.path.basename(filename).replace(".dat", "")
+ if testName in ("template",):
+ continue
+
+ tests = TestData(filename, "data")
+
+ for index, test in enumerate(tests):
+ input, errors, innerHTML, expected = [test[key] for key in
+ ('data', 'errors',
+ 'document-fragment',
+ 'document')]
+ if errors:
+ errors = errors.split("\n")
+
+ for treeName, treeCls in treeTypes.items():
+ for namespaceHTMLElements in (True, False):
+ yield (runParserTest, innerHTML, input, expected, errors, treeCls,
+ namespaceHTMLElements)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser2.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser2.py
new file mode 100644
index 000000000..20bbdf317
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_parser2.py
@@ -0,0 +1,64 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import io
+
+from . import support # flake8: noqa
+from html5lib import html5parser
+from html5lib.constants import namespaces
+from html5lib import treebuilders
+
+import unittest
+
+# tests that aren't autogenerated from text files
+
+
+class MoreParserTests(unittest.TestCase):
+
+ def setUp(self):
+ self.dom_tree = treebuilders.getTreeBuilder("dom")
+
+ def test_assertDoctypeCloneable(self):
+ parser = html5parser.HTMLParser(tree=self.dom_tree)
+ doc = parser.parse('<!DOCTYPE HTML>')
+ self.assertTrue(doc.cloneNode(True))
+
+ def test_line_counter(self):
+ # http://groups.google.com/group/html5lib-discuss/browse_frm/thread/f4f00e4a2f26d5c0
+ parser = html5parser.HTMLParser(tree=self.dom_tree)
+ parser.parse("<pre>\nx\n&gt;\n</pre>")
+
+ def test_namespace_html_elements_0_dom(self):
+ parser = html5parser.HTMLParser(tree=self.dom_tree, namespaceHTMLElements=True)
+ doc = parser.parse("<html></html>")
+ self.assertTrue(doc.childNodes[0].namespaceURI == namespaces["html"])
+
+ def test_namespace_html_elements_1_dom(self):
+ parser = html5parser.HTMLParser(tree=self.dom_tree, namespaceHTMLElements=False)
+ doc = parser.parse("<html></html>")
+ self.assertTrue(doc.childNodes[0].namespaceURI is None)
+
+ def test_namespace_html_elements_0_etree(self):
+ parser = html5parser.HTMLParser(namespaceHTMLElements=True)
+ doc = parser.parse("<html></html>")
+ self.assertTrue(list(doc)[0].tag == "{%s}html" % (namespaces["html"],))
+
+ def test_namespace_html_elements_1_etree(self):
+ parser = html5parser.HTMLParser(namespaceHTMLElements=False)
+ doc = parser.parse("<html></html>")
+ self.assertTrue(list(doc)[0].tag == "html")
+
+ def test_unicode_file(self):
+ parser = html5parser.HTMLParser()
+ parser.parse(io.StringIO("a"))
+
+
+def buildTestSuite():
+ return unittest.defaultTestLoader.loadTestsFromName(__name__)
+
+
+def main():
+ buildTestSuite()
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_sanitizer.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_sanitizer.py
new file mode 100644
index 000000000..1cc687dfc
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_sanitizer.py
@@ -0,0 +1,105 @@
+from __future__ import absolute_import, division, unicode_literals
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from html5lib import html5parser, sanitizer, constants, treebuilders
+
+
+def toxmlFactory():
+ tree = treebuilders.getTreeBuilder("etree")
+
+ def toxml(element):
+ # encode/decode roundtrip required for Python 2.6 compatibility
+ result_bytes = tree.implementation.tostring(element, encoding="utf-8")
+ return result_bytes.decode("utf-8")
+
+ return toxml
+
+
+def runSanitizerTest(name, expected, input, toxml=None):
+ if toxml is None:
+ toxml = toxmlFactory()
+ expected = ''.join([toxml(token) for token in html5parser.HTMLParser().
+ parseFragment(expected)])
+ expected = json.loads(json.dumps(expected))
+ assert expected == sanitize_html(input)
+
+
+def sanitize_html(stream, toxml=None):
+ if toxml is None:
+ toxml = toxmlFactory()
+ return ''.join([toxml(token) for token in
+ html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer).
+ parseFragment(stream)])
+
+
+def test_should_handle_astral_plane_characters():
+ assert '<html:p xmlns:html="http://www.w3.org/1999/xhtml">\U0001d4b5 \U0001d538</html:p>' == sanitize_html("<p>&#x1d4b5; &#x1d538;</p>")
+
+
+def test_sanitizer():
+ toxml = toxmlFactory()
+ for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
+ if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr']:
+ continue # TODO
+ if tag_name != tag_name.lower():
+ continue # TODO
+ if tag_name == 'image':
+ yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
+ "<img title=\"1\"/>foo &lt;bad&gt;bar&lt;/bad&gt; baz",
+ "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
+ toxml)
+ elif tag_name == 'br':
+ yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
+ "<br title=\"1\"/>foo &lt;bad&gt;bar&lt;/bad&gt; baz<br/>",
+ "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
+ toxml)
+ elif tag_name in constants.voidElements:
+ yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
+ "<%s title=\"1\"/>foo &lt;bad&gt;bar&lt;/bad&gt; baz" % tag_name,
+ "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
+ toxml)
+ else:
+ yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
+ "<%s title=\"1\">foo &lt;bad&gt;bar&lt;/bad&gt; baz</%s>" % (tag_name, tag_name),
+ "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
+ toxml)
+
+ for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
+ tag_name = tag_name.upper()
+ yield (runSanitizerTest, "test_should_forbid_%s_tag" % tag_name,
+ "&lt;%s title=\"1\"&gt;foo &lt;bad&gt;bar&lt;/bad&gt; baz&lt;/%s&gt;" % (tag_name, tag_name),
+ "<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
+ toxml)
+
+ for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
+ if attribute_name != attribute_name.lower():
+ continue # TODO
+ if attribute_name == 'style':
+ continue
+ yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
+ "<p %s=\"foo\">foo &lt;bad&gt;bar&lt;/bad&gt; baz</p>" % attribute_name,
+ "<p %s='foo'>foo <bad>bar</bad> baz</p>" % attribute_name,
+ toxml)
+
+ for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
+ attribute_name = attribute_name.upper()
+ yield (runSanitizerTest, "test_should_forbid_%s_attribute" % attribute_name,
+ "<p>foo &lt;bad&gt;bar&lt;/bad&gt; baz</p>",
+ "<p %s='display: none;'>foo <bad>bar</bad> baz</p>" % attribute_name,
+ toxml)
+
+ for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
+ yield (runSanitizerTest, "test_should_allow_%s_uris" % protocol,
+ "<a href=\"%s\">foo</a>" % protocol,
+ """<a href="%s">foo</a>""" % protocol,
+ toxml)
+
+ for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
+ yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
+ "<a href=\"%s\">foo</a>" % protocol,
+ """<a href="%s">foo</a>""" % protocol,
+ toxml)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_serializer.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_serializer.py
new file mode 100644
index 000000000..3c37feff7
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_serializer.py
@@ -0,0 +1,178 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import json
+import unittest
+
+from .support import get_data_files
+
+try:
+ unittest.TestCase.assertEqual
+except AttributeError:
+ unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
+
+import html5lib
+from html5lib import constants
+from html5lib.serializer import HTMLSerializer, serialize
+from html5lib.treewalkers._base import TreeWalker
+
+optionals_loaded = []
+
+try:
+ from lxml import etree
+ optionals_loaded.append("lxml")
+except ImportError:
+ pass
+
+default_namespace = constants.namespaces["html"]
+
+
+class JsonWalker(TreeWalker):
+ def __iter__(self):
+ for token in self.tree:
+ type = token[0]
+ if type == "StartTag":
+ if len(token) == 4:
+ namespace, name, attrib = token[1:4]
+ else:
+ namespace = default_namespace
+ name, attrib = token[1:3]
+ yield self.startTag(namespace, name, self._convertAttrib(attrib))
+ elif type == "EndTag":
+ if len(token) == 3:
+ namespace, name = token[1:3]
+ else:
+ namespace = default_namespace
+ name = token[1]
+ yield self.endTag(namespace, name)
+ elif type == "EmptyTag":
+ if len(token) == 4:
+ namespace, name, attrib = token[1:]
+ else:
+ namespace = default_namespace
+ name, attrib = token[1:]
+ for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
+ yield token
+ elif type == "Comment":
+ yield self.comment(token[1])
+ elif type in ("Characters", "SpaceCharacters"):
+ for token in self.text(token[1]):
+ yield token
+ elif type == "Doctype":
+ if len(token) == 4:
+ yield self.doctype(token[1], token[2], token[3])
+ elif len(token) == 3:
+ yield self.doctype(token[1], token[2])
+ else:
+ yield self.doctype(token[1])
+ else:
+ raise ValueError("Unknown token type: " + type)
+
+ def _convertAttrib(self, attribs):
+ """html5lib tree-walkers use a dict of (namespace, name): value for
+ attributes, but JSON cannot represent this. Convert from the format
+ in the serializer tests (a list of dicts with "namespace", "name",
+ and "value" as keys) to html5lib's tree-walker format."""
+ attrs = {}
+ for attrib in attribs:
+ name = (attrib["namespace"], attrib["name"])
+ assert(name not in attrs)
+ attrs[name] = attrib["value"]
+ return attrs
+
+
+def serialize_html(input, options):
+ options = dict([(str(k), v) for k, v in options.items()])
+ stream = JsonWalker(input)
+ serializer = HTMLSerializer(alphabetical_attributes=True, **options)
+ return serializer.render(stream, options.get("encoding", None))
+
+
+def runSerializerTest(input, expected, options):
+ encoding = options.get("encoding", None)
+
+ if encoding:
+ encode = lambda x: x.encode(encoding)
+ expected = list(map(encode, expected))
+
+ result = serialize_html(input, options)
+ if len(expected) == 1:
+ assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions:\n%s" % (expected[0], result, str(options))
+ elif result not in expected:
+ assert False, "Expected: %s, Received: %s" % (expected, result)
+
+
+class EncodingTestCase(unittest.TestCase):
+ def throwsWithLatin1(self, input):
+ self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"})
+
+ def testDoctypeName(self):
+ self.throwsWithLatin1([["Doctype", "\u0101"]])
+
+ def testDoctypePublicId(self):
+ self.throwsWithLatin1([["Doctype", "potato", "\u0101"]])
+
+ def testDoctypeSystemId(self):
+ self.throwsWithLatin1([["Doctype", "potato", "potato", "\u0101"]])
+
+ def testCdataCharacters(self):
+ runSerializerTest([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}], ["Characters", "\u0101"]],
+ ["<style>&amacr;"], {"encoding": "iso-8859-1"})
+
+ def testCharacters(self):
+ runSerializerTest([["Characters", "\u0101"]],
+ ["&amacr;"], {"encoding": "iso-8859-1"})
+
+ def testStartTagName(self):
+ self.throwsWithLatin1([["StartTag", "http://www.w3.org/1999/xhtml", "\u0101", []]])
+
+ def testEmptyTagName(self):
+ self.throwsWithLatin1([["EmptyTag", "http://www.w3.org/1999/xhtml", "\u0101", []]])
+
+ def testAttributeName(self):
+ self.throwsWithLatin1([["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": None, "name": "\u0101", "value": "potato"}]]])
+
+ def testAttributeValue(self):
+ runSerializerTest([["StartTag", "http://www.w3.org/1999/xhtml", "span",
+ [{"namespace": None, "name": "potato", "value": "\u0101"}]]],
+ ["<span potato=&amacr;>"], {"encoding": "iso-8859-1"})
+
+ def testEndTagName(self):
+ self.throwsWithLatin1([["EndTag", "http://www.w3.org/1999/xhtml", "\u0101"]])
+
+ def testComment(self):
+ self.throwsWithLatin1([["Comment", "\u0101"]])
+
+
+if "lxml" in optionals_loaded:
+ class LxmlTestCase(unittest.TestCase):
+ def setUp(self):
+ self.parser = etree.XMLParser(resolve_entities=False)
+ self.treewalker = html5lib.getTreeWalker("lxml")
+ self.serializer = HTMLSerializer()
+
+ def testEntityReplacement(self):
+ doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>"""
+ tree = etree.fromstring(doc, parser=self.parser).getroottree()
+ result = serialize(tree, tree="lxml", omit_optional_tags=False)
+ self.assertEqual("""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result)
+
+ def testEntityXML(self):
+ doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&gt;</html>"""
+ tree = etree.fromstring(doc, parser=self.parser).getroottree()
+ result = serialize(tree, tree="lxml", omit_optional_tags=False)
+ self.assertEqual("""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&gt;</html>""", result)
+
+ def testEntityNoResolve(self):
+ doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>"""
+ tree = etree.fromstring(doc, parser=self.parser).getroottree()
+ result = serialize(tree, tree="lxml", omit_optional_tags=False,
+ resolve_entities=False)
+ self.assertEqual("""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>&beta;</html>""", result)
+
+
+def test_serializer():
+ for filename in get_data_files('serializer', '*.test'):
+ with open(filename) as fp:
+ tests = json.load(fp)
+ for index, test in enumerate(tests['tests']):
+ yield runSerializerTest, test["input"], test["expected"], test.get("options", {})
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_stream.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_stream.py
new file mode 100644
index 000000000..2a876c1db
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_stream.py
@@ -0,0 +1,183 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import support # flake8: noqa
+import unittest
+import codecs
+from io import BytesIO
+
+from six.moves import http_client
+
+from html5lib.inputstream import (BufferedStream, HTMLInputStream,
+ HTMLUnicodeInputStream, HTMLBinaryInputStream)
+
+class BufferedStreamTest(unittest.TestCase):
+ def test_basic(self):
+ s = b"abc"
+ fp = BufferedStream(BytesIO(s))
+ read = fp.read(10)
+ assert read == s
+
+ def test_read_length(self):
+ fp = BufferedStream(BytesIO(b"abcdef"))
+ read1 = fp.read(1)
+ assert read1 == b"a"
+ read2 = fp.read(2)
+ assert read2 == b"bc"
+ read3 = fp.read(3)
+ assert read3 == b"def"
+ read4 = fp.read(4)
+ assert read4 == b""
+
+ def test_tell(self):
+ fp = BufferedStream(BytesIO(b"abcdef"))
+ read1 = fp.read(1)
+ assert fp.tell() == 1
+ read2 = fp.read(2)
+ assert fp.tell() == 3
+ read3 = fp.read(3)
+ assert fp.tell() == 6
+ read4 = fp.read(4)
+ assert fp.tell() == 6
+
+ def test_seek(self):
+ fp = BufferedStream(BytesIO(b"abcdef"))
+ read1 = fp.read(1)
+ assert read1 == b"a"
+ fp.seek(0)
+ read2 = fp.read(1)
+ assert read2 == b"a"
+ read3 = fp.read(2)
+ assert read3 == b"bc"
+ fp.seek(2)
+ read4 = fp.read(2)
+ assert read4 == b"cd"
+ fp.seek(4)
+ read5 = fp.read(2)
+ assert read5 == b"ef"
+
+ def test_seek_tell(self):
+ fp = BufferedStream(BytesIO(b"abcdef"))
+ read1 = fp.read(1)
+ assert fp.tell() == 1
+ fp.seek(0)
+ read2 = fp.read(1)
+ assert fp.tell() == 1
+ read3 = fp.read(2)
+ assert fp.tell() == 3
+ fp.seek(2)
+ read4 = fp.read(2)
+ assert fp.tell() == 4
+ fp.seek(4)
+ read5 = fp.read(2)
+ assert fp.tell() == 6
+
+
+class HTMLUnicodeInputStreamShortChunk(HTMLUnicodeInputStream):
+ _defaultChunkSize = 2
+
+
+class HTMLBinaryInputStreamShortChunk(HTMLBinaryInputStream):
+ _defaultChunkSize = 2
+
+
+class HTMLInputStreamTest(unittest.TestCase):
+
+ def test_char_ascii(self):
+ stream = HTMLInputStream(b"'", encoding='ascii')
+ self.assertEqual(stream.charEncoding[0], 'ascii')
+ self.assertEqual(stream.char(), "'")
+
+ def test_char_utf8(self):
+ stream = HTMLInputStream('\u2018'.encode('utf-8'), encoding='utf-8')
+ self.assertEqual(stream.charEncoding[0], 'utf-8')
+ self.assertEqual(stream.char(), '\u2018')
+
+ def test_char_win1252(self):
+ stream = HTMLInputStream("\xa9\xf1\u2019".encode('windows-1252'))
+ self.assertEqual(stream.charEncoding[0], 'windows-1252')
+ self.assertEqual(stream.char(), "\xa9")
+ self.assertEqual(stream.char(), "\xf1")
+ self.assertEqual(stream.char(), "\u2019")
+
+ def test_bom(self):
+ stream = HTMLInputStream(codecs.BOM_UTF8 + b"'")
+ self.assertEqual(stream.charEncoding[0], 'utf-8')
+ self.assertEqual(stream.char(), "'")
+
+ def test_utf_16(self):
+ stream = HTMLInputStream((' ' * 1025).encode('utf-16'))
+ self.assertTrue(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding)
+ self.assertEqual(len(stream.charsUntil(' ', True)), 1025)
+
+ def test_newlines(self):
+ stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\r\nccc\rddddxe")
+ self.assertEqual(stream.position(), (1, 0))
+ self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
+ self.assertEqual(stream.position(), (3, 0))
+ self.assertEqual(stream.charsUntil('x'), "ccc\ndddd")
+ self.assertEqual(stream.position(), (4, 4))
+ self.assertEqual(stream.charsUntil('e'), "x")
+ self.assertEqual(stream.position(), (4, 5))
+
+ def test_newlines2(self):
+ size = HTMLUnicodeInputStream._defaultChunkSize
+ stream = HTMLInputStream("\r" * size + "\n")
+ self.assertEqual(stream.charsUntil('x'), "\n" * size)
+
+ def test_position(self):
+ stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\nccc\nddde\nf\ngh")
+ self.assertEqual(stream.position(), (1, 0))
+ self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
+ self.assertEqual(stream.position(), (3, 0))
+ stream.unget("\n")
+ self.assertEqual(stream.position(), (2, 2))
+ self.assertEqual(stream.charsUntil('c'), "\n")
+ self.assertEqual(stream.position(), (3, 0))
+ stream.unget("\n")
+ self.assertEqual(stream.position(), (2, 2))
+ self.assertEqual(stream.char(), "\n")
+ self.assertEqual(stream.position(), (3, 0))
+ self.assertEqual(stream.charsUntil('e'), "ccc\nddd")
+ self.assertEqual(stream.position(), (4, 3))
+ self.assertEqual(stream.charsUntil('h'), "e\nf\ng")
+ self.assertEqual(stream.position(), (6, 1))
+
+ def test_position2(self):
+ stream = HTMLUnicodeInputStreamShortChunk("abc\nd")
+ self.assertEqual(stream.position(), (1, 0))
+ self.assertEqual(stream.char(), "a")
+ self.assertEqual(stream.position(), (1, 1))
+ self.assertEqual(stream.char(), "b")
+ self.assertEqual(stream.position(), (1, 2))
+ self.assertEqual(stream.char(), "c")
+ self.assertEqual(stream.position(), (1, 3))
+ self.assertEqual(stream.char(), "\n")
+ self.assertEqual(stream.position(), (2, 0))
+ self.assertEqual(stream.char(), "d")
+ self.assertEqual(stream.position(), (2, 1))
+
+ def test_python_issue_20007(self):
+ """
+ Make sure we have a work-around for Python bug #20007
+ http://bugs.python.org/issue20007
+ """
+ class FakeSocket(object):
+ def makefile(self, _mode, _bufsize=None):
+ return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
+
+ source = http_client.HTTPResponse(FakeSocket())
+ source.begin()
+ stream = HTMLInputStream(source)
+ self.assertEqual(stream.charsUntil(" "), "Text")
+
+
+def buildTestSuite():
+ return unittest.defaultTestLoader.loadTestsFromName(__name__)
+
+
+def main():
+ buildTestSuite()
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_tokenizer.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_tokenizer.py
new file mode 100644
index 000000000..90315ab35
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_tokenizer.py
@@ -0,0 +1,188 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import json
+import warnings
+import re
+
+from .support import get_data_files
+
+from html5lib.tokenizer import HTMLTokenizer
+from html5lib import constants
+
+
+class TokenizerTestParser(object):
+ def __init__(self, initialState, lastStartTag=None):
+ self.tokenizer = HTMLTokenizer
+ self._state = initialState
+ self._lastStartTag = lastStartTag
+
+ def parse(self, stream, encoding=None, innerHTML=False):
+ tokenizer = self.tokenizer(stream, encoding)
+ self.outputTokens = []
+
+ tokenizer.state = getattr(tokenizer, self._state)
+ if self._lastStartTag is not None:
+ tokenizer.currentToken = {"type": "startTag",
+ "name": self._lastStartTag}
+
+ types = dict((v, k) for k, v in constants.tokenTypes.items())
+ for token in tokenizer:
+ getattr(self, 'process%s' % types[token["type"]])(token)
+
+ return self.outputTokens
+
+ def processDoctype(self, token):
+ self.outputTokens.append(["DOCTYPE", token["name"], token["publicId"],
+ token["systemId"], token["correct"]])
+
+ def processStartTag(self, token):
+ self.outputTokens.append(["StartTag", token["name"],
+ dict(token["data"][::-1]), token["selfClosing"]])
+
+ def processEmptyTag(self, token):
+ if token["name"] not in constants.voidElements:
+ self.outputTokens.append("ParseError")
+ self.outputTokens.append(["StartTag", token["name"], dict(token["data"][::-1])])
+
+ def processEndTag(self, token):
+ self.outputTokens.append(["EndTag", token["name"],
+ token["selfClosing"]])
+
+ def processComment(self, token):
+ self.outputTokens.append(["Comment", token["data"]])
+
+ def processSpaceCharacters(self, token):
+ self.outputTokens.append(["Character", token["data"]])
+ self.processSpaceCharacters = self.processCharacters
+
+ def processCharacters(self, token):
+ self.outputTokens.append(["Character", token["data"]])
+
+ def processEOF(self, token):
+ pass
+
+ def processParseError(self, token):
+ self.outputTokens.append(["ParseError", token["data"]])
+
+
+def concatenateCharacterTokens(tokens):
+ outputTokens = []
+ for token in tokens:
+ if "ParseError" not in token and token[0] == "Character":
+ if (outputTokens and "ParseError" not in outputTokens[-1] and
+ outputTokens[-1][0] == "Character"):
+ outputTokens[-1][1] += token[1]
+ else:
+ outputTokens.append(token)
+ else:
+ outputTokens.append(token)
+ return outputTokens
+
+
+def normalizeTokens(tokens):
+ # TODO: convert tests to reflect arrays
+ for i, token in enumerate(tokens):
+ if token[0] == 'ParseError':
+ tokens[i] = token[0]
+ return tokens
+
+
+def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
+ ignoreErrors=False):
+ """Test whether the test has passed or failed
+
+ If the ignoreErrorOrder flag is set to true we don't test the relative
+ positions of parse errors and non parse errors
+ """
+ checkSelfClosing = False
+ for token in expectedTokens:
+ if (token[0] == "StartTag" and len(token) == 4
+ or token[0] == "EndTag" and len(token) == 3):
+ checkSelfClosing = True
+ break
+
+ if not checkSelfClosing:
+ for token in receivedTokens:
+ if token[0] == "StartTag" or token[0] == "EndTag":
+ token.pop()
+
+ if not ignoreErrorOrder and not ignoreErrors:
+ return expectedTokens == receivedTokens
+ else:
+ # Sort the tokens into two groups; non-parse errors and parse errors
+ tokens = {"expected": [[], []], "received": [[], []]}
+ for tokenType, tokenList in zip(list(tokens.keys()),
+ (expectedTokens, receivedTokens)):
+ for token in tokenList:
+ if token != "ParseError":
+ tokens[tokenType][0].append(token)
+ else:
+ if not ignoreErrors:
+ tokens[tokenType][1].append(token)
+ return tokens["expected"] == tokens["received"]
+
+
+def unescape(test):
+ def decode(inp):
+ return inp.encode("utf-8").decode("unicode-escape")
+
+ test["input"] = decode(test["input"])
+ for token in test["output"]:
+ if token == "ParseError":
+ continue
+ else:
+ token[1] = decode(token[1])
+ if len(token) > 2:
+ for key, value in token[2]:
+ del token[2][key]
+ token[2][decode(key)] = decode(value)
+ return test
+
+
+def runTokenizerTest(test):
+ warnings.resetwarnings()
+ warnings.simplefilter("error")
+
+ expected = concatenateCharacterTokens(test['output'])
+ if 'lastStartTag' not in test:
+ test['lastStartTag'] = None
+ parser = TokenizerTestParser(test['initialState'],
+ test['lastStartTag'])
+ tokens = parser.parse(test['input'])
+ tokens = concatenateCharacterTokens(tokens)
+ received = normalizeTokens(tokens)
+ errorMsg = "\n".join(["\n\nInitial state:",
+ test['initialState'],
+ "\nInput:", test['input'],
+ "\nExpected:", repr(expected),
+ "\nreceived:", repr(tokens)])
+ errorMsg = errorMsg
+ ignoreErrorOrder = test.get('ignoreErrorOrder', False)
+ assert tokensMatch(expected, received, ignoreErrorOrder, True), errorMsg
+
+
+def _doCapitalize(match):
+ return match.group(1).upper()
+
+_capitalizeRe = re.compile(r"\W+(\w)").sub
+
+
+def capitalize(s):
+ s = s.lower()
+ s = _capitalizeRe(_doCapitalize, s)
+ return s
+
+
+def testTokenizer():
+ for filename in get_data_files('tokenizer', '*.test'):
+ with open(filename) as fp:
+ tests = json.load(fp)
+ if 'tests' in tests:
+ for index, test in enumerate(tests['tests']):
+ if 'initialStates' not in test:
+ test["initialStates"] = ["Data state"]
+ if 'doubleEscaped' in test:
+ test = unescape(test)
+ for initialState in test["initialStates"]:
+ test["initialState"] = capitalize(initialState)
+ yield runTokenizerTest, test
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treeadapters.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treeadapters.py
new file mode 100644
index 000000000..5f38b6c33
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treeadapters.py
@@ -0,0 +1,40 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import support # flake8: noqa
+
+import html5lib
+from html5lib.treeadapters import sax
+from html5lib.treewalkers import getTreeWalker
+
+
+def test_to_sax():
+ handler = support.TracingSaxHandler()
+ tree = html5lib.parse("""<html xml:lang="en">
+ <title>Directory Listing</title>
+ <a href="/"><b/></p>
+ """, treebuilder="etree")
+ walker = getTreeWalker("etree")
+ sax.to_sax(walker(tree), handler)
+ expected = [
+ 'startDocument',
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'html'),
+ 'html', {(None, 'xml:lang'): 'en'}),
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head', {}),
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title', {}),
+ ('characters', 'Directory Listing'),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title'),
+ ('characters', '\n '),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head'),
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body', {}),
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a', {(None, 'href'): '/'}),
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b', {}),
+ ('startElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p', {}),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p'),
+ ('characters', '\n '),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b'),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a'),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body'),
+ ('endElementNS', ('http://www.w3.org/1999/xhtml', 'html'), 'html'),
+ 'endDocument',
+ ]
+ assert expected == handler.visited
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treewalkers.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treewalkers.py
new file mode 100644
index 000000000..b7756035c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_treewalkers.py
@@ -0,0 +1,353 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import os
+import sys
+import unittest
+import warnings
+from difflib import unified_diff
+
+try:
+ unittest.TestCase.assertEqual
+except AttributeError:
+ unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
+
+from .support import get_data_files, TestData, convertExpected
+
+from html5lib import html5parser, treewalkers, treebuilders, constants
+
+
+def PullDOMAdapter(node):
+ from xml.dom import Node
+ from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
+
+ if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
+ for childNode in node.childNodes:
+ for event in PullDOMAdapter(childNode):
+ yield event
+
+ elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
+ raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
+
+ elif node.nodeType == Node.COMMENT_NODE:
+ yield COMMENT, node
+
+ elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ yield CHARACTERS, node
+
+ elif node.nodeType == Node.ELEMENT_NODE:
+ yield START_ELEMENT, node
+ for childNode in node.childNodes:
+ for event in PullDOMAdapter(childNode):
+ yield event
+ yield END_ELEMENT, node
+
+ else:
+ raise NotImplementedError("Node type not supported: " + str(node.nodeType))
+
+treeTypes = {
+ "DOM": {"builder": treebuilders.getTreeBuilder("dom"),
+ "walker": treewalkers.getTreeWalker("dom")},
+ "PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
+ "adapter": PullDOMAdapter,
+ "walker": treewalkers.getTreeWalker("pulldom")},
+}
+
+# Try whatever etree implementations are available from a list that are
+#"supposed" to work
+try:
+ import xml.etree.ElementTree as ElementTree
+except ImportError:
+ pass
+else:
+ treeTypes['ElementTree'] = \
+ {"builder": treebuilders.getTreeBuilder("etree", ElementTree),
+ "walker": treewalkers.getTreeWalker("etree", ElementTree)}
+
+try:
+ import xml.etree.cElementTree as ElementTree
+except ImportError:
+ pass
+else:
+ treeTypes['cElementTree'] = \
+ {"builder": treebuilders.getTreeBuilder("etree", ElementTree),
+ "walker": treewalkers.getTreeWalker("etree", ElementTree)}
+
+
+try:
+ import lxml.etree as ElementTree # flake8: noqa
+except ImportError:
+ pass
+else:
+ treeTypes['lxml_native'] = \
+ {"builder": treebuilders.getTreeBuilder("lxml"),
+ "walker": treewalkers.getTreeWalker("lxml")}
+
+
+try:
+ from genshi.core import QName, Attrs
+ from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
+except ImportError:
+ pass
+else:
+ def GenshiAdapter(tree):
+ text = None
+ for token in treewalkers.getTreeWalker("dom")(tree):
+ type = token["type"]
+ if type in ("Characters", "SpaceCharacters"):
+ if text is None:
+ text = token["data"]
+ else:
+ text += token["data"]
+ elif text is not None:
+ yield TEXT, text, (None, -1, -1)
+ text = None
+
+ if type in ("StartTag", "EmptyTag"):
+ if token["namespace"]:
+ name = "{%s}%s" % (token["namespace"], token["name"])
+ else:
+ name = token["name"]
+ attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
+ for attr, value in token["data"].items()])
+ yield (START, (QName(name), attrs), (None, -1, -1))
+ if type == "EmptyTag":
+ type = "EndTag"
+
+ if type == "EndTag":
+ if token["namespace"]:
+ name = "{%s}%s" % (token["namespace"], token["name"])
+ else:
+ name = token["name"]
+
+ yield END, QName(name), (None, -1, -1)
+
+ elif type == "Comment":
+ yield COMMENT, token["data"], (None, -1, -1)
+
+ elif type == "Doctype":
+ yield DOCTYPE, (token["name"], token["publicId"],
+ token["systemId"]), (None, -1, -1)
+
+ else:
+ pass # FIXME: What to do?
+
+ if text is not None:
+ yield TEXT, text, (None, -1, -1)
+
+ treeTypes["genshi"] = \
+ {"builder": treebuilders.getTreeBuilder("dom"),
+ "adapter": GenshiAdapter,
+ "walker": treewalkers.getTreeWalker("genshi")}
+
+
+def concatenateCharacterTokens(tokens):
+ charactersToken = None
+ for token in tokens:
+ type = token["type"]
+ if type in ("Characters", "SpaceCharacters"):
+ if charactersToken is None:
+ charactersToken = {"type": "Characters", "data": token["data"]}
+ else:
+ charactersToken["data"] += token["data"]
+ else:
+ if charactersToken is not None:
+ yield charactersToken
+ charactersToken = None
+ yield token
+ if charactersToken is not None:
+ yield charactersToken
+
+
+def convertTokens(tokens):
+ output = []
+ indent = 0
+ for token in concatenateCharacterTokens(tokens):
+ type = token["type"]
+ if type in ("StartTag", "EmptyTag"):
+ if (token["namespace"] and
+ token["namespace"] != constants.namespaces["html"]):
+ if token["namespace"] in constants.prefixes:
+ name = constants.prefixes[token["namespace"]]
+ else:
+ name = token["namespace"]
+ name += " " + token["name"]
+ else:
+ name = token["name"]
+ output.append("%s<%s>" % (" " * indent, name))
+ indent += 2
+ attrs = token["data"]
+ if attrs:
+ # TODO: Remove this if statement, attrs should always exist
+ for (namespace, name), value in sorted(attrs.items()):
+ if namespace:
+ if namespace in constants.prefixes:
+ outputname = constants.prefixes[namespace]
+ else:
+ outputname = namespace
+ outputname += " " + name
+ else:
+ outputname = name
+ output.append("%s%s=\"%s\"" % (" " * indent, outputname, value))
+ if type == "EmptyTag":
+ indent -= 2
+ elif type == "EndTag":
+ indent -= 2
+ elif type == "Comment":
+ output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
+ elif type == "Doctype":
+ if token["name"]:
+ if token["publicId"]:
+ output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
+ (" " * indent, token["name"],
+ token["publicId"],
+ token["systemId"] and token["systemId"] or ""))
+ elif token["systemId"]:
+ output.append("""%s<!DOCTYPE %s "" "%s">""" %
+ (" " * indent, token["name"],
+ token["systemId"]))
+ else:
+ output.append("%s<!DOCTYPE %s>" % (" " * indent,
+ token["name"]))
+ else:
+ output.append("%s<!DOCTYPE >" % (" " * indent,))
+ elif type in ("Characters", "SpaceCharacters"):
+ output.append("%s\"%s\"" % (" " * indent, token["data"]))
+ else:
+ pass # TODO: what to do with errors?
+ return "\n".join(output)
+
+import re
+attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+", re.M)
+
+
+def sortattrs(x):
+ lines = x.group(0).split("\n")
+ lines.sort()
+ return "\n".join(lines)
+
+
+class TokenTestCase(unittest.TestCase):
+ def test_all_tokens(self):
+ expected = [
+ {'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
+ {'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
+ {'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
+ {'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
+ {'data': 'a', 'type': 'Characters'},
+ {'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
+ {'data': 'b', 'type': 'Characters'},
+ {'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
+ {'data': 'c', 'type': 'Characters'},
+ {'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
+ {'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
+ ]
+ for treeName, treeCls in treeTypes.items():
+ p = html5parser.HTMLParser(tree=treeCls["builder"])
+ document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
+ document = treeCls.get("adapter", lambda x: x)(document)
+ output = treeCls["walker"](document)
+ for expectedToken, outputToken in zip(expected, output):
+ self.assertEqual(expectedToken, outputToken)
+
+
+def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
+ warnings.resetwarnings()
+ warnings.simplefilter("error")
+ try:
+ p = html5parser.HTMLParser(tree=treeClass["builder"])
+ if innerHTML:
+ document = p.parseFragment(input, innerHTML)
+ else:
+ document = p.parse(input)
+ except constants.DataLossWarning:
+ # Ignore testcases we know we don't pass
+ return
+
+ document = treeClass.get("adapter", lambda x: x)(document)
+ try:
+ output = convertTokens(treeClass["walker"](document))
+ output = attrlist.sub(sortattrs, output)
+ expected = attrlist.sub(sortattrs, convertExpected(expected))
+ diff = "".join(unified_diff([line + "\n" for line in expected.splitlines()],
+ [line + "\n" for line in output.splitlines()],
+ "Expected", "Received"))
+ assert expected == output, "\n".join([
+ "", "Input:", input,
+ "", "Expected:", expected,
+ "", "Received:", output,
+ "", "Diff:", diff,
+ ])
+ except NotImplementedError:
+ pass # Amnesty for those that confess...
+
+
+def test_treewalker():
+ sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n")
+
+ for treeName, treeCls in treeTypes.items():
+ files = get_data_files('tree-construction')
+ for filename in files:
+ testName = os.path.basename(filename).replace(".dat", "")
+ if testName in ("template",):
+ continue
+
+ tests = TestData(filename, "data")
+
+ for index, test in enumerate(tests):
+ (input, errors,
+ innerHTML, expected) = [test[key] for key in ("data", "errors",
+ "document-fragment",
+ "document")]
+ errors = errors.split("\n")
+ yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
+
+
+def set_attribute_on_first_child(docfrag, name, value, treeName):
+ """naively sets an attribute on the first child of the document
+ fragment passed in"""
+ setter = {'ElementTree': lambda d: d[0].set,
+ 'DOM': lambda d: d.firstChild.setAttribute}
+ setter['cElementTree'] = setter['ElementTree']
+ try:
+ setter.get(treeName, setter['DOM'])(docfrag)(name, value)
+ except AttributeError:
+ setter['ElementTree'](docfrag)(name, value)
+
+
+def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
+ """tests what happens when we add attributes to the intext"""
+ treeName, treeClass = tree
+ parser = html5parser.HTMLParser(tree=treeClass["builder"])
+ document = parser.parseFragment(intext)
+ for nom, val in attrs_to_add:
+ set_attribute_on_first_child(document, nom, val, treeName)
+
+ document = treeClass.get("adapter", lambda x: x)(document)
+ output = convertTokens(treeClass["walker"](document))
+ output = attrlist.sub(sortattrs, output)
+ if not output in expected:
+ raise AssertionError("TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s" % (treeName, expected, output))
+
+
+def test_treewalker_six_mix():
+ """Str/Unicode mix. If str attrs added to tree"""
+
+ # On Python 2.x string literals are of type str. Unless, like this
+ # file, the programmer imports unicode_literals from __future__.
+ # In that case, string literals become objects of type unicode.
+
+ # This test simulates a Py2 user, modifying attributes on a document
+ # fragment but not using the u'' syntax nor importing unicode_literals
+ sm_tests = [
+ ('<a href="http://example.com">Example</a>',
+ [(str('class'), str('test123'))],
+ '<a>\n class="test123"\n href="http://example.com"\n "Example"'),
+
+ ('<link href="http://example.com/cow">',
+ [(str('rel'), str('alternate'))],
+ '<link>\n href="http://example.com/cow"\n rel="alternate"\n "Example"')
+ ]
+
+ for tree in treeTypes.items():
+ for intext, attrs, expected in sm_tests:
+ yield runTreewalkerEditTest, intext, expected, attrs, tree
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_whitespace_filter.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_whitespace_filter.py
new file mode 100644
index 000000000..9ed27fd6a
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/test_whitespace_filter.py
@@ -0,0 +1,133 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import unittest
+
+from html5lib.filters.whitespace import Filter
+from html5lib.constants import spaceCharacters
+spaceCharacters = "".join(spaceCharacters)
+
+try:
+ unittest.TestCase.assertEqual
+except AttributeError:
+ unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
+
+
+class TestCase(unittest.TestCase):
+ def runTest(self, input, expected):
+ output = list(Filter(input))
+ errorMsg = "\n".join(["\n\nInput:", str(input),
+ "\nExpected:", str(expected),
+ "\nReceived:", str(output)])
+ self.assertEqual(output, expected, errorMsg)
+
+ def runTestUnmodifiedOutput(self, input):
+ self.runTest(input, input)
+
+ def testPhrasingElements(self):
+ self.runTestUnmodifiedOutput(
+ [{"type": "Characters", "data": "This is a "},
+ {"type": "StartTag", "name": "span", "data": []},
+ {"type": "Characters", "data": "phrase"},
+ {"type": "EndTag", "name": "span", "data": []},
+ {"type": "SpaceCharacters", "data": " "},
+ {"type": "Characters", "data": "with"},
+ {"type": "SpaceCharacters", "data": " "},
+ {"type": "StartTag", "name": "em", "data": []},
+ {"type": "Characters", "data": "emphasised text"},
+ {"type": "EndTag", "name": "em", "data": []},
+ {"type": "Characters", "data": " and an "},
+ {"type": "StartTag", "name": "img", "data": [["alt", "image"]]},
+ {"type": "Characters", "data": "."}])
+
+ def testLeadingWhitespace(self):
+ self.runTest(
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "SpaceCharacters", "data": spaceCharacters},
+ {"type": "Characters", "data": "foo"},
+ {"type": "EndTag", "name": "p", "data": []}],
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "SpaceCharacters", "data": " "},
+ {"type": "Characters", "data": "foo"},
+ {"type": "EndTag", "name": "p", "data": []}])
+
+ def testLeadingWhitespaceAsCharacters(self):
+ self.runTest(
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": spaceCharacters + "foo"},
+ {"type": "EndTag", "name": "p", "data": []}],
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": " foo"},
+ {"type": "EndTag", "name": "p", "data": []}])
+
+ def testTrailingWhitespace(self):
+ self.runTest(
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": "foo"},
+ {"type": "SpaceCharacters", "data": spaceCharacters},
+ {"type": "EndTag", "name": "p", "data": []}],
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": "foo"},
+ {"type": "SpaceCharacters", "data": " "},
+ {"type": "EndTag", "name": "p", "data": []}])
+
+ def testTrailingWhitespaceAsCharacters(self):
+ self.runTest(
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": "foo" + spaceCharacters},
+ {"type": "EndTag", "name": "p", "data": []}],
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": "foo "},
+ {"type": "EndTag", "name": "p", "data": []}])
+
+ def testWhitespace(self):
+ self.runTest(
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": "foo" + spaceCharacters + "bar"},
+ {"type": "EndTag", "name": "p", "data": []}],
+ [{"type": "StartTag", "name": "p", "data": []},
+ {"type": "Characters", "data": "foo bar"},
+ {"type": "EndTag", "name": "p", "data": []}])
+
+ def testLeadingWhitespaceInPre(self):
+ self.runTestUnmodifiedOutput(
+ [{"type": "StartTag", "name": "pre", "data": []},
+ {"type": "SpaceCharacters", "data": spaceCharacters},
+ {"type": "Characters", "data": "foo"},
+ {"type": "EndTag", "name": "pre", "data": []}])
+
+ def testLeadingWhitespaceAsCharactersInPre(self):
+ self.runTestUnmodifiedOutput(
+ [{"type": "StartTag", "name": "pre", "data": []},
+ {"type": "Characters", "data": spaceCharacters + "foo"},
+ {"type": "EndTag", "name": "pre", "data": []}])
+
+ def testTrailingWhitespaceInPre(self):
+ self.runTestUnmodifiedOutput(
+ [{"type": "StartTag", "name": "pre", "data": []},
+ {"type": "Characters", "data": "foo"},
+ {"type": "SpaceCharacters", "data": spaceCharacters},
+ {"type": "EndTag", "name": "pre", "data": []}])
+
+ def testTrailingWhitespaceAsCharactersInPre(self):
+ self.runTestUnmodifiedOutput(
+ [{"type": "StartTag", "name": "pre", "data": []},
+ {"type": "Characters", "data": "foo" + spaceCharacters},
+ {"type": "EndTag", "name": "pre", "data": []}])
+
+ def testWhitespaceInPre(self):
+ self.runTestUnmodifiedOutput(
+ [{"type": "StartTag", "name": "pre", "data": []},
+ {"type": "Characters", "data": "foo" + spaceCharacters + "bar"},
+ {"type": "EndTag", "name": "pre", "data": []}])
+
+
+def buildTestSuite():
+ return unittest.defaultTestLoader.loadTestsFromName(__name__)
+
+
+def main():
+ buildTestSuite()
+ unittest.main()
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/AUTHORS.rst b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/AUTHORS.rst
new file mode 100644
index 000000000..4a7de17ad
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/AUTHORS.rst
@@ -0,0 +1,34 @@
+Credits
+=======
+
+The ``html5lib`` test data is maintained by:
+
+- James Graham
+- Geoffrey Sneddon
+
+
+Contributors
+------------
+
+- Adam Barth
+- Andi Sidwell
+- Anne van Kesteren
+- David Flanagan
+- Edward Z. Yang
+- Geoffrey Sneddon
+- Henri Sivonen
+- Ian Hickson
+- Jacques Distler
+- James Graham
+- Lachlan Hunt
+- lantis63
+- Mark Pilgrim
+- Mats Palmgren
+- Ms2ger
+- Nolan Waite
+- Philip Taylor
+- Rafael Weinstein
+- Ryan King
+- Sam Ruby
+- Simon Pieters
+- Thomas Broyer
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/LICENSE b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/LICENSE
new file mode 100644
index 000000000..8812371b4
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2006-2013 James Graham, Geoffrey Sneddon, and
+other contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/chardet/test_big5.txt b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/chardet/test_big5.txt
new file mode 100644
index 000000000..91074c98d
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/chardet/test_big5.txt
@@ -0,0 +1,51 @@
+¦Ñ¤l¡m¹D¼w¸g¡n ²Ä¤@~¥|¤Q³¹
+
+¦Ñ¤l¹D¸g
+
+²Ä¤@³¹
+
+¹D¥i¹D¡A«D±`¹D¡C¦W¥i¦W¡A«D±`¦W¡CµL¡A¦W¤Ñ¦a¤§©l¡Q¦³¡A¦W¸Uª«¤§¥À¡C
+¬G±`µL¡A±ý¥HÆ[¨ä§®¡F±`¦³¡A±ý¥HÆ[¨äéu¡C¦¹¨âªÌ¡A¦P¥X¦Ó²§¦W¡A¦P¿×¤§
+¥È¡C¥È¤§¤S¥È¡A²³§®¤§ªù¡C
+
+²Ä¤G³¹
+
+¤Ñ¤U¬Òª¾¬ü¤§¬°¬ü¡A´µ´c¨o¡Q¬Òª¾µ½¤§¬°µ½¡A´µ¤£µ½¨o¡C¬G¦³µL¬Û¥Í¡AÃø
+©ö¬Û¦¨¡Aªøµu¬Û§Î¡A°ª¤U¬Û¶É¡A­µÁn¬Û©M¡A«e«á¬ÛÀH¡C¬O¥H¸t¤H³B¡uµL¬°
+¡v¤§¨Æ¡A¦æ¡u¤£¨¥¡v¤§±Ð¡C¸Uª«§@²j¦Ó¤£Ãã¡A¥Í¦Ó¤£¦³¡A¬°¦Ó¤£«î¡A¥\¦¨
+¦Ó¥±©~¡C¤Ò°ß¥±©~¡A¬O¥H¤£¥h¡C
+
+²Ä¤T³¹
+
+¤£©|½å¡A¨Ï¥Á¤£ª§¡Q¤£¶QÃø±o¤§³f¡A¨Ï¥Á¤£¬°µs¡Q¤£¨£¥i±ý¡A¨Ï¥Á¤ß¤£¶Ã
+¡C¬O¥H¡u¸t¤H¡v¤§ªv¡Aµê¨ä¤ß¡A¹ê¨ä¸¡¡A®z¨ä§Ó¡A±j¨ä°©¡C±`¨Ï¥ÁµLª¾µL
+±ý¡C¨Ï¤Ò´¼ªÌ¤£´±¬°¤]¡C¬°¡uµL¬°¡v¡A«hµL¤£ªv¡C
+
+²Ä¥|³¹
+
+¡u¹D¡v¨R¡A¦Ó¥Î¤§©Î¤£¬Õ¡C²W¤¼¡A¦ü¸Uª«¤§©v¡Q®À¨ä¾U¡A¸Ñ¨ä¯É¡A©M¨ä¥ú
+¡A¦P¨ä¹Ð¡Q´ï¤¼¦ü©Î¦s¡C§^¤£ª¾½Ö¤§¤l¡H¶H«Ò¤§¥ý¡C
+
+²Ä¤­³¹
+
+¤Ñ¦a¤£¤¯¡A¥H¸Uª«¬°¯ìª¯¡Q¸t¤H¤£¤¯¡A¥H¦Ê©m¬°¯ìª¯¡C¤Ñ¦a¤§¶¡¡A¨äµSéÑ
+õþ¥G¡Hµê¦Ó¤£©}¡A°Ê¦Ó·U¥X¡C¦h¨¥¼Æ½a¡A¤£¦p¦u¤¤¡C
+
+²Ä¤»³¹
+
+¨¦¯«¤£¦º¡A¬O¿×¥È¦É¡C¥È¦É¤§ªù¡A¬O¿×¤Ñ¦a®Ú¡Cºøºø­Y¦s¡A¥Î¤§¤£¶Ô¡C
+
+²Ä¤C³¹
+
+¤Ñªø¦a¤[¡C¤Ñ¦a©Ò¥H¯àªø¥B¤[ªÌ¡A¥H¨ä¤£¦Û¥Í¡A¬G¯àªø¤[¡C¬O¥H¸t¤H«á¨ä
+¨­¦Ó¨­¥ý¡A¥~¨ä¨­¦Ó¨­¦s¡C«D¥H¨äµL¨p¨¸¡H¬G¯à¦¨¨ä¨p¡C
+
+²Ä¤K³¹
+
+¤Wµ½­Y¤ô¡C¤ôµ½§Q¸Uª«¦Ó¤£ª§¡C³B²³¤H¤§©Ò´c¡A¬G´X©ó¹D¡C©~µ½¦a¡A¤ßµ½
+²W¡A»Pµ½¤¯¡A¨¥µ½«H¡A¬Fµ½ªv¡A¨Æµ½¯à¡A°Êµ½®É¡C¤Ò°ß¤£ª§¡A¬GµL¤×¡C
+
+²Ä¤E³¹
+
+«ù¦Ó¬Õ¤§¡A¤£¦p¨ä¤w¡Q´¢¦Ó¾U¤§¡A¤£¥iªø«O¡Cª÷¥Éº¡°ó¡A²ö¤§¯à¦u¡Q´I¶Q
+¦Óź¡A¦Û¿ò¨ä©S¡C¥\¹E¨­°h¡A¤Ñ¤§¹D¡C
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/test-yahoo-jp.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/test-yahoo-jp.dat
new file mode 100644
index 000000000..3c25ecb2b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/test-yahoo-jp.dat
@@ -0,0 +1,10 @@
+#data
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=euc-jp">
+<!--京-->
+<title>Yahoo! JAPAN</title>
+<meta name="description" content="日本最大級ã®ãƒãƒ¼ã‚¿ãƒ«ã‚µã‚¤ãƒˆã€‚検索ã€ã‚ªãƒ¼ã‚¯ã‚·ãƒ§ãƒ³ã€ãƒ‹ãƒ¥ãƒ¼ã‚¹ã€ãƒ¡ãƒ¼ãƒ«ã€ã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã€ã‚·ãƒ§ãƒƒãƒ”ングã€ãªã©80以上ã®ã‚µãƒ¼ãƒ“スを展開。ã‚ãªãŸã®ç”Ÿæ´»ã‚’より豊ã‹ã«ã™ã‚‹ã€Œãƒ©ã‚¤ãƒ•ãƒ»ã‚¨ãƒ³ã‚¸ãƒ³ã€ã‚’目指ã—ã¦ã„ãã¾ã™ã€‚">
+<style type="text/css" media="all">
+#encoding
+euc_jp
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests1.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests1.dat
new file mode 100644
index 000000000..5b585e735
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests1.dat
@@ -0,0 +1,394 @@
+#data
+<!DOCTYPE HTML>
+<!-- (control test - for the other tests to work, this should pass - you may have to set your defaults appropriately) -->
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta charset="ISO-8859-1">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset='iso8859-2'>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset=iso8859-2>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta
+charset=iso8859-2>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<metacharset=iso8859-2>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta http-equiv="Content-Type" content="text/html; charset=iso8859-2">
+<!-- XXX this is a tough one, not sure how to do this one, unless we explictly do content= processing -->
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta content="text/html; charset=iso8859-2" http-equiv="Content-Type">
+<!-- XXX this is a tough one, not sure how to do this one, unless we explictly do content= processing -->
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta http-equiv="Content-Type" content=text/html; charset=iso8859-2>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta http-equiv="Content-Type content="text/html; charset=iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta http-equiv="Content-Type " content="text/html; charset=iso8859-2">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content="text/html; charset=iso8859-2" http-equiv="Content-Type ">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta http-equiv="Content-Type>" content="text/html; charset=iso8859-2">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content="text/html; charset=iso8859-2" http-equiv="Content-Type>">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta http-equiv="Content-Style-Type" content="text/html; charset=iso8859-2">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content="text/html; charset=iso8859-2" http-equiv="Content-Style-Type">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta name="Content-Style-Type" content="text/html; charset=iso8859-2">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content="text/html; charset=iso8859-2" name="Content-Style-Type">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content="text/html; charset=iso8859-2">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content=" text/html; charset = iso8859-2 ">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta content="
+text/html; charset=iso8859-2
+" http-equiv="Content-Type">
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset="
+iso8859-2
+">
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset=
+iso8859-2
+>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset="iso8859-2>
+<p>"</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta charset=iso8859-2">
+<p>"</p>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta " charset=iso8859-2>
+<p>"</p>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta test" charset=iso8859-2>
+<p>"</p>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta test=" charset=iso8859-2>
+<p>"</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta test="' charset=iso8859-2>
+<p>"'</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta test='" charset=iso8859-2>
+<p>'"</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta test="" charset=iso8859-2>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta test=x" charset=iso8859-2>
+<p>"</p>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<head></head><p title="x>
+<meta test=x" charset=iso8859-2>
+<p>"</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<head></head><p title="x>
+<meta test=x charset=iso8859-2>
+<p>"</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<head></head><p title="x>
+<meta charset=iso8859-2>
+<p>"</p>
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<head></head><p title="x>">
+<meta charset=iso8859-2>
+<p>"</p>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset="ISO-8859-1">
+<meta charset="iso8859-2">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<meta charset="iso8859-2">
+<meta charset="ISO-8859-1">
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<!--<meta charset="ISO-8859-1">-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<!--<meta charset="iso8859-2">-->
+<meta charset="ISO-8859-1">
+#encoding
+Windows-1252
+
+#data
+<!DOCTYPE HTML>
+<!-- Starts with UTF-8 BOM -->
+#encoding
+UTF-8
+
+#data
+<!DOCTYPE HTML>
+<meta charset="ISO-8859-1">
+<!-- Starts with UTF-8 BOM -->
+#encoding
+UTF-8
+
+#data
+<!-- 511 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 512 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 1024 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 1025 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 2048 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 2049 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 4096 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 4097 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 8192 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- 8193 characters xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz-->
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
+
+#data
+<!-- multi-script test -->
+<script>alert('step 1 of 3 ("þ")')</script>
+<!-- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -->
+<script>alert('step 2 of 3 ("þ")')</script>
+<meta charset="iso8859-2">
+<script>alert('step 3 of 3 ("þ")')</script>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<script>document.write('<meta charset="ISO-8859-' + '2">')</script>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<script>document.write('<meta charset="iso8859-2">')</script>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<script type="text/plain"><meta charset="iso8859-2"></script>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<style type="text/plain"><meta charset="iso8859-2"></style>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<p><meta charset="iso8859-2"></p>
+#encoding
+iso8859-2
+
+#data
+<!DOCTYPE HTML>
+<meta charset="bogus">
+<meta charset="iso8859-2">
+#encoding
+iso8859-2
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests2.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests2.dat
new file mode 100644
index 000000000..eee44984d
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/encoding/tests2.dat
@@ -0,0 +1,115 @@
+#data
+<meta
+#encoding
+windows-1252
+
+#data
+<
+#encoding
+windows-1252
+
+#data
+<!
+#encoding
+windows-1252
+
+#data
+<meta charset = "
+#encoding
+windows-1252
+
+#data
+<meta charset=euc_jp
+#encoding
+windows-1252
+
+#data
+<meta <meta charset='euc_jp'>
+#encoding
+euc_jp
+
+#data
+<meta charset = 'euc_jp'>
+#encoding
+euc_jp
+
+#data
+<!-- -->
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+#encoding
+utf-8
+
+#data
+<!-- -->
+<meta http-equiv="Content-Type" content="text/html; charset=utf
+#encoding
+windows-1252
+
+#data
+<meta http-equiv="Content-Type<meta charset="utf-8">
+#encoding
+windows-1252
+
+#data
+<meta http-equiv="Content-Type" content="text/html; charset='utf-8'">
+#encoding
+utf-8
+
+#data
+<meta http-equiv="Content-Type" content="text/html; charset='utf-8">
+#encoding
+windows-1252
+
+#data
+<meta
+#encoding
+windows-1252
+
+#data
+<meta charset =
+#encoding
+windows-1252
+
+#data
+<meta charset= utf-8
+>
+#encoding
+utf-8
+
+#data
+<meta content = "text/html;
+#encoding
+windows-1252
+
+#data
+<meta charset="UTF-16">
+#encoding
+utf-8
+
+#data
+<meta charset="UTF-16LE">
+#encoding
+utf-8
+
+#data
+<meta charset="UTF-16BE">
+#encoding
+utf-8
+
+#data
+<html a=ñ>
+<meta charset="utf-8">
+#encoding
+utf-8
+
+#data
+<html ñ>
+<meta charset="utf-8">
+#encoding
+utf-8
+
+#data
+<html>ñ
+<meta charset="utf-8">
+#encoding
+utf-8
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sanitizer/tests1.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sanitizer/tests1.dat
new file mode 100644
index 000000000..c741cb8c6
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sanitizer/tests1.dat
@@ -0,0 +1,501 @@
+[
+ {
+ "name": "IE_Comments",
+ "input": "<!--[if gte IE 4]><script>alert('XSS');</script><![endif]-->",
+ "output": ""
+ },
+
+ {
+ "name": "IE_Comments_2",
+ "input": "<![if !IE 5]><script>alert('XSS');</script><![endif]>",
+ "output": "&lt;script&gt;alert('XSS');&lt;/script&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "allow_colons_in_path_component",
+ "input": "<a href=\"./this:that\">foo</a>",
+ "output": "<a href='./this:that'>foo</a>"
+ },
+
+ {
+ "name": "background_attribute",
+ "input": "<div background=\"javascript:alert('XSS')\"></div>",
+ "output": "<div/>",
+ "xhtml": "<div></div>",
+ "rexml": "<div></div>"
+ },
+
+ {
+ "name": "bgsound",
+ "input": "<bgsound src=\"javascript:alert('XSS');\" />",
+ "output": "&lt;bgsound src=\"javascript:alert('XSS');\"/&gt;",
+ "rexml": "&lt;bgsound src=\"javascript:alert('XSS');\"&gt;&lt;/bgsound&gt;"
+ },
+
+ {
+ "name": "div_background_image_unicode_encoded",
+ "input": "<div style=\"background-image:\u00a5\u00a2\u006C\u0028'\u006a\u0061\u00a6\u0061\u00a3\u0063\u00a2\u0069\u00a0\u00a4\u003a\u0061\u006c\u0065\u00a2\u00a4\u0028.1027\u0058.1053\u0053\u0027\u0029'\u0029\">foo</div>",
+ "output": "<div style=''>foo</div>"
+ },
+
+ {
+ "name": "div_expression",
+ "input": "<div style=\"width: expression(alert('XSS'));\">foo</div>",
+ "output": "<div style=''>foo</div>"
+ },
+
+ {
+ "name": "double_open_angle_brackets",
+ "input": "<img src=http://ha.ckers.org/scriptlet.html <",
+ "output": "<img src='http://ha.ckers.org/scriptlet.html'>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "double_open_angle_brackets_2",
+ "input": "<script src=http://ha.ckers.org/scriptlet.html <",
+ "output": "&lt;script src=\"http://ha.ckers.org/scriptlet.html\" &lt;=\"\"&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "grave_accents",
+ "input": "<img src=`javascript:alert('XSS')` />",
+ "output": "<img/>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "img_dynsrc_lowsrc",
+ "input": "<img dynsrc=\"javascript:alert('XSS')\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "img_vbscript",
+ "input": "<img src='vbscript:msgbox(\"XSS\")' />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "input_image",
+ "input": "<input type=\"image\" src=\"javascript:alert('XSS');\" />",
+ "output": "<input type='image'/>",
+ "rexml": "<input type='image' />"
+ },
+
+ {
+ "name": "link_stylesheets",
+ "input": "<link rel=\"stylesheet\" href=\"javascript:alert('XSS');\" />",
+ "output": "&lt;link rel=\"stylesheet\" href=\"javascript:alert('XSS');\"/&gt;",
+ "rexml": "&lt;link href=\"javascript:alert('XSS');\" rel=\"stylesheet\"/&gt;"
+ },
+
+ {
+ "name": "link_stylesheets_2",
+ "input": "<link rel=\"stylesheet\" href=\"http://ha.ckers.org/xss.css\" />",
+ "output": "&lt;link rel=\"stylesheet\" href=\"http://ha.ckers.org/xss.css\"/&gt;",
+ "rexml": "&lt;link href=\"http://ha.ckers.org/xss.css\" rel=\"stylesheet\"/&gt;"
+ },
+
+ {
+ "name": "list_style_image",
+ "input": "<li style=\"list-style-image: url(javascript:alert('XSS'))\">foo</li>",
+ "output": "<li style=''>foo</li>"
+ },
+
+ {
+ "name": "no_closing_script_tags",
+ "input": "<script src=http://ha.ckers.org/xss.js?<b>",
+ "output": "&lt;script src=\"http://ha.ckers.org/xss.js?&amp;lt;b\"&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "non_alpha_non_digit",
+ "input": "<script/XSS src=\"http://ha.ckers.org/xss.js\"></script>",
+ "output": "&lt;script XSS=\"\" src=\"http://ha.ckers.org/xss.js\"&gt;&lt;/script&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "non_alpha_non_digit_2",
+ "input": "<a onclick!\\#$%&()*~+-_.,:;?@[/|\\]^`=alert(\"XSS\")>foo</a>",
+ "output": "<a>foo</a>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "non_alpha_non_digit_3",
+ "input": "<img/src=\"http://ha.ckers.org/xss.js\"/>",
+ "output": "<img src='http://ha.ckers.org/xss.js'/>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "non_alpha_non_digit_II",
+ "input": "<a href!\\#$%&()*~+-_.,:;?@[/|]^`=alert('XSS')>foo</a>",
+ "output": "<a>foo</a>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "non_alpha_non_digit_III",
+ "input": "<a/href=\"javascript:alert('XSS');\">foo</a>",
+ "output": "<a>foo</a>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "platypus",
+ "input": "<a href=\"http://www.ragingplatypus.com/\" style=\"display:block; position:absolute; left:0; top:0; width:100%; height:100%; z-index:1; background-color:black; background-image:url(http://www.ragingplatypus.com/i/cam-full.jpg); background-x:center; background-y:center; background-repeat:repeat;\">never trust your upstream platypus</a>",
+ "output": "<a href='http://www.ragingplatypus.com/' style='display: block; width: 100%; height: 100%; background-color: black; background-x: center; background-y: center;'>never trust your upstream platypus</a>"
+ },
+
+ {
+ "name": "protocol_resolution_in_script_tag",
+ "input": "<script src=//ha.ckers.org/.j></script>",
+ "output": "&lt;script src=\"//ha.ckers.org/.j\"&gt;&lt;/script&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_allow_anchors",
+ "input": "<a href='foo' onclick='bar'><script>baz</script></a>",
+ "output": "<a href='foo'>&lt;script&gt;baz&lt;/script&gt;</a>"
+ },
+
+ {
+ "name": "should_allow_image_alt_attribute",
+ "input": "<img alt='foo' onclick='bar' />",
+ "output": "<img alt='foo'/>",
+ "rexml": "<img alt='foo' />"
+ },
+
+ {
+ "name": "should_allow_image_height_attribute",
+ "input": "<img height='foo' onclick='bar' />",
+ "output": "<img height='foo'/>",
+ "rexml": "<img height='foo' />"
+ },
+
+ {
+ "name": "should_allow_image_src_attribute",
+ "input": "<img src='foo' onclick='bar' />",
+ "output": "<img src='foo'/>",
+ "rexml": "<img src='foo' />"
+ },
+
+ {
+ "name": "should_allow_image_width_attribute",
+ "input": "<img width='foo' onclick='bar' />",
+ "output": "<img width='foo'/>",
+ "rexml": "<img width='foo' />"
+ },
+
+ {
+ "name": "should_handle_blank_text",
+ "input": "",
+ "output": ""
+ },
+
+ {
+ "name": "should_handle_malformed_image_tags",
+ "input": "<img \"\"\"><script>alert(\"XSS\")</script>\">",
+ "output": "<img/>&lt;script&gt;alert(\"XSS\")&lt;/script&gt;\"&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_handle_non_html",
+ "input": "abc",
+ "output": "abc"
+ },
+
+ {
+ "name": "should_not_fall_for_ridiculous_hack",
+ "input": "<img\nsrc\n=\n\"\nj\na\nv\na\ns\nc\nr\ni\np\nt\n:\na\nl\ne\nr\nt\n(\n'\nX\nS\nS\n'\n)\n\"\n />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_0",
+ "input": "<img src=\"javascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_1",
+ "input": "<img src=javascript:alert('XSS') />",
+ "output": "<img/>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_10",
+ "input": "<img src=\"jav&#x0A;ascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_11",
+ "input": "<img src=\"jav&#x0D;ascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_12",
+ "input": "<img src=\" &#14; javascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_13",
+ "input": "<img src=\"&#x20;javascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_14",
+ "input": "<img src=\"&#xA0;javascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_2",
+ "input": "<img src=\"JaVaScRiPt:alert('XSS')\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_3",
+ "input": "<img src='javascript:alert(&quot;XSS&quot;)' />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_4",
+ "input": "<img src='javascript:alert(String.fromCharCode(88,83,83))' />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_5",
+ "input": "<img src='&#106;&#97;&#118;&#97;&#115;&#99;&#114;&#105;&#112;&#116;&#58;&#97;&#108;&#101;&#114;&#116;&#40;&#39;&#88;&#83;&#83;&#39;&#41;' />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_6",
+ "input": "<img src='&#0000106;&#0000097;&#0000118;&#0000097;&#0000115;&#0000099;&#0000114;&#0000105;&#0000112;&#0000116;&#0000058;&#0000097;&#0000108;&#0000101;&#0000114;&#0000116;&#0000040;&#0000039;&#0000088;&#0000083;&#0000083;&#0000039;&#0000041' />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_7",
+ "input": "<img src='&#x6A;&#x61;&#x76;&#x61;&#x73;&#x63;&#x72;&#x69;&#x70;&#x74;&#x3A;&#x61;&#x6C;&#x65;&#x72;&#x74;&#x28;&#x27;&#x58;&#x53;&#x53;&#x27;&#x29' />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_8",
+ "input": "<img src=\"jav\tascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_not_fall_for_xss_image_hack_9",
+ "input": "<img src=\"jav&#x09;ascript:alert('XSS');\" />",
+ "output": "<img/>",
+ "rexml": "<img />"
+ },
+
+ {
+ "name": "should_sanitize_half_open_scripts",
+ "input": "<img src=\"javascript:alert('XSS')\"",
+ "output": "<img/>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_sanitize_invalid_script_tag",
+ "input": "<script/XSS SRC=\"http://ha.ckers.org/xss.js\"></script>",
+ "output": "&lt;script XSS=\"\" SRC=\"http://ha.ckers.org/xss.js\"&gt;&lt;/script&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_sanitize_script_tag_with_multiple_open_brackets",
+ "input": "<<script>alert(\"XSS\");//<</script>",
+ "output": "&lt;&lt;script&gt;alert(\"XSS\");//&lt;&lt;/script&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_sanitize_script_tag_with_multiple_open_brackets_2",
+ "input": "<iframe src=http://ha.ckers.org/scriptlet.html\n<",
+ "output": "&lt;iframe src=\"http://ha.ckers.org/scriptlet.html\" &lt;=\"\"&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_sanitize_tag_broken_up_by_null",
+ "input": "<scr\u0000ipt>alert(\"XSS\")</scr\u0000ipt>",
+ "output": "&lt;scr\ufffdipt&gt;alert(\"XSS\")&lt;/scr\ufffdipt&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_sanitize_unclosed_script",
+ "input": "<script src=http://ha.ckers.org/xss.js?<b>",
+ "output": "&lt;script src=\"http://ha.ckers.org/xss.js?&amp;lt;b\"&gt;",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "should_strip_href_attribute_in_a_with_bad_protocols",
+ "input": "<a href=\"javascript:XSS\" title=\"1\">boo</a>",
+ "output": "<a title='1'>boo</a>"
+ },
+
+ {
+ "name": "should_strip_href_attribute_in_a_with_bad_protocols_and_whitespace",
+ "input": "<a href=\" javascript:XSS\" title=\"1\">boo</a>",
+ "output": "<a title='1'>boo</a>"
+ },
+
+ {
+ "name": "should_strip_src_attribute_in_img_with_bad_protocols",
+ "input": "<img src=\"javascript:XSS\" title=\"1\">boo</img>",
+ "output": "<img title='1'/>boo",
+ "rexml": "<img title='1' />"
+ },
+
+ {
+ "name": "should_strip_src_attribute_in_img_with_bad_protocols_and_whitespace",
+ "input": "<img src=\" javascript:XSS\" title=\"1\">boo</img>",
+ "output": "<img title='1'/>boo",
+ "rexml": "<img title='1' />"
+ },
+
+ {
+ "name": "xml_base",
+ "input": "<div xml:base=\"javascript:alert('XSS');//\">foo</div>",
+ "output": "<div>foo</div>"
+ },
+
+ {
+ "name": "xul",
+ "input": "<p style=\"-moz-binding:url('http://ha.ckers.org/xssmoz.xml#xss')\">fubar</p>",
+ "output": "<p style=''>fubar</p>"
+ },
+
+ {
+ "name": "quotes_in_attributes",
+ "input": "<img src='foo' title='\"foo\" bar' />",
+ "rexml": "<img src='foo' title='\"foo\" bar' />",
+ "output": "<img title='&quot;foo&quot; bar' src='foo'/>"
+ },
+
+ {
+ "name": "uri_refs_in_svg_attributes",
+ "input": "<rect fill='url(#foo)' />",
+ "rexml": "<rect fill='url(#foo)'></rect>",
+ "xhtml": "<rect fill='url(#foo)'></rect>",
+ "output": "<rect fill='url(#foo)'/>"
+ },
+
+ {
+ "name": "absolute_uri_refs_in_svg_attributes",
+ "input": "<rect fill='url(http://bad.com/) #fff' />",
+ "rexml": "<rect fill=' #fff'></rect>",
+ "xhtml": "<rect fill=' #fff'></rect>",
+ "output": "<rect fill=' #fff'/>"
+ },
+
+ {
+ "name": "uri_ref_with_space_in svg_attribute",
+ "input": "<rect fill='url(\n#foo)' />",
+ "rexml": "<rect fill='url(\n#foo)'></rect>",
+ "xhtml": "<rect fill='url(\n#foo)'></rect>",
+ "output": "<rect fill='url(\n#foo)'/>"
+ },
+
+ {
+ "name": "absolute_uri_ref_with_space_in svg_attribute",
+ "input": "<rect fill=\"url(\nhttp://bad.com/)\" />",
+ "rexml": "<rect fill=' '></rect>",
+ "xhtml": "<rect fill=' '></rect>",
+ "output": "<rect fill=' '/>"
+ },
+
+ {
+ "name": "allow_html5_image_tag",
+ "input": "<image src='foo' />",
+ "rexml": "&lt;image src=\"foo\"&gt;&lt;/image&gt;",
+ "output": "&lt;image src=\"foo\"/&gt;"
+ },
+
+ {
+ "name": "style_attr_end_with_nothing",
+ "input": "<div style=\"color: blue\" />",
+ "output": "<div style='color: blue;'/>",
+ "xhtml": "<div style='color: blue;'></div>",
+ "rexml": "<div style='color: blue;'></div>"
+ },
+
+ {
+ "name": "style_attr_end_with_space",
+ "input": "<div style=\"color: blue \" />",
+ "output": "<div style='color: blue ;'/>",
+ "xhtml": "<div style='color: blue ;'></div>",
+ "rexml": "<div style='color: blue ;'></div>"
+ },
+
+ {
+ "name": "style_attr_end_with_semicolon",
+ "input": "<div style=\"color: blue;\" />",
+ "output": "<div style='color: blue;'/>",
+ "xhtml": "<div style='color: blue;'></div>",
+ "rexml": "<div style='color: blue;'></div>"
+ },
+
+ {
+ "name": "style_attr_end_with_semicolon_space",
+ "input": "<div style=\"color: blue; \" />",
+ "output": "<div style='color: blue;'/>",
+ "xhtml": "<div style='color: blue;'></div>",
+ "rexml": "<div style='color: blue;'></div>"
+ },
+
+ {
+ "name": "attributes_with_embedded_quotes",
+ "input": "<img src=doesntexist.jpg\"'onerror=\"alert(1) />",
+ "output": "<img src='doesntexist.jpg&quot;&apos;onerror=&quot;alert(1)'/>",
+ "rexml": "Ill-formed XHTML!"
+ },
+
+ {
+ "name": "attributes_with_embedded_quotes_II",
+ "input": "<img src=notthere.jpg\"\"onerror=\"alert(2) />",
+ "output": "<img src='notthere.jpg&quot;&quot;onerror=&quot;alert(2)'/>",
+ "rexml": "Ill-formed XHTML!"
+ }
+]
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/core.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/core.test
new file mode 100644
index 000000000..c0b4222d2
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/core.test
@@ -0,0 +1,125 @@
+{"tests": [
+
+{"description": "proper attribute value escaping",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "test \"with\" &quot;"}]]],
+ "expected": ["<span title='test \"with\" &amp;quot;'>"]
+},
+
+{"description": "proper attribute value non-quoting",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo"}]]],
+ "expected": ["<span title=foo>"],
+ "xhtml": ["<span title=\"foo\">"]
+},
+
+{"description": "proper attribute value non-quoting (with <)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo<bar"}]]],
+ "expected": ["<span title=foo<bar>"],
+ "xhtml": ["<span title=\"foo&lt;bar\">"]
+},
+
+{"description": "proper attribute value quoting (with =)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo=bar"}]]],
+ "expected": ["<span title=\"foo=bar\">"]
+},
+
+{"description": "proper attribute value quoting (with >)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo>bar"}]]],
+ "expected": ["<span title=\"foo>bar\">"]
+},
+
+{"description": "proper attribute value quoting (with \")",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo\"bar"}]]],
+ "expected": ["<span title='foo\"bar'>"]
+},
+
+{"description": "proper attribute value quoting (with ')",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo'bar"}]]],
+ "expected": ["<span title=\"foo'bar\">"]
+},
+
+{"description": "proper attribute value quoting (with both \" and ')",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo'bar\"baz"}]]],
+ "expected": ["<span title=\"foo'bar&quot;baz\">"]
+},
+
+{"description": "proper attribute value quoting (with space)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo bar"}]]],
+ "expected": ["<span title=\"foo bar\">"]
+},
+
+{"description": "proper attribute value quoting (with tab)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo\tbar"}]]],
+ "expected": ["<span title=\"foo\tbar\">"]
+},
+
+{"description": "proper attribute value quoting (with LF)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo\nbar"}]]],
+ "expected": ["<span title=\"foo\nbar\">"]
+},
+
+{"description": "proper attribute value quoting (with CR)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo\rbar"}]]],
+ "expected": ["<span title=\"foo\rbar\">"]
+},
+
+{"description": "proper attribute value non-quoting (with linetab)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo\u000Bbar"}]]],
+ "expected": ["<span title=foo\u000Bbar>"],
+ "xhtml": ["<span title=\"foo\u000Bbar\">"]
+},
+
+{"description": "proper attribute value quoting (with form feed)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "foo\u000Cbar"}]]],
+ "expected": ["<span title=\"foo\u000Cbar\">"]
+},
+
+{"description": "void element (as EmptyTag token)",
+ "input": [["EmptyTag", "img", {}]],
+ "expected": ["<img>"],
+ "xhtml": ["<img />"]
+},
+
+{"description": "void element (as StartTag token)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "img", {}]],
+ "expected": ["<img>"],
+ "xhtml": ["<img />"]
+},
+
+{"description": "doctype in error",
+ "input": [["Doctype", "foo"]],
+ "expected": ["<!DOCTYPE foo>"]
+},
+
+{"description": "character data",
+ "options": {"encoding":"utf-8"},
+ "input": [["Characters", "a<b>c&d"]],
+ "expected": ["a&lt;b&gt;c&amp;d"]
+},
+
+{"description": "rcdata",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "script", {}], ["Characters", "a<b>c&d"]],
+ "expected": ["<script>a<b>c&d"],
+ "xhtml": ["<script>a&lt;b&gt;c&amp;d"]
+},
+
+{"description": "doctype",
+ "input": [["Doctype", "HTML"]],
+ "expected": ["<!DOCTYPE HTML>"]
+},
+
+{"description": "HTML 4.01 DOCTYPE",
+ "input": [["Doctype", "HTML", "-//W3C//DTD HTML 4.01//EN", "http://www.w3.org/TR/html4/strict.dtd"]],
+ "expected": ["<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">"]
+},
+
+{"description": "HTML 4.01 DOCTYPE without system identifer",
+ "input": [["Doctype", "HTML", "-//W3C//DTD HTML 4.01//EN"]],
+ "expected": ["<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\">"]
+},
+
+{"description": "IBM DOCTYPE without public identifer",
+ "input": [["Doctype", "html", "", "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"]],
+ "expected": ["<!DOCTYPE html SYSTEM \"http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd\">"]
+}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/injectmeta.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/injectmeta.test
new file mode 100644
index 000000000..feaaa44f8
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/injectmeta.test
@@ -0,0 +1,66 @@
+{"tests": [
+
+{"description": "no encoding",
+ "options": {"inject_meta_charset": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": [""],
+ "xhtml": ["<head></head>"]
+},
+
+{"description": "empytag head",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta charset=utf-8>"],
+ "xhtml": ["<head><meta charset=\"utf-8\" /></head>"]
+},
+
+{"description": "head w/title",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["StartTag", "http://www.w3.org/1999/xhtml","title",{}], ["Characters", "foo"],["EndTag", "http://www.w3.org/1999/xhtml", "title"], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta charset=utf-8><title>foo</title>"],
+ "xhtml": ["<head><meta charset=\"utf-8\" /><title>foo</title></head>"]
+},
+
+{"description": "head w/meta-charset",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag","meta",[{"namespace": null, "name": "charset", "value": "ascii"}]], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta charset=utf-8>"],
+ "xhtml": ["<head><meta charset=\"utf-8\" /></head>"]
+},
+
+{"description": "head w/ two meta-charset",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag","meta",[{"namespace": null, "name": "charset", "value": "ascii"}]], ["EmptyTag","meta",[{"namespace": null, "name": "charset", "value": "ascii"}]], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta charset=utf-8><meta charset=utf-8>", "<head><meta charset=utf-8><meta charset=ascii>"],
+ "xhtml": ["<head><meta charset=\"utf-8\" /><meta charset=\"utf-8\" /></head>", "<head><meta charset=\"utf-8\" /><meta charset=\"ascii\" /></head>"]
+},
+
+{"description": "head w/robots",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag","meta",[{"namespace": null, "name": "name", "value": "robots"},{"namespace": null, "name": "content", "value": "noindex"}]], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta charset=utf-8><meta content=noindex name=robots>"],
+ "xhtml": ["<head><meta charset=\"utf-8\" /><meta content=\"noindex\" name=\"robots\" /></head>"]
+},
+
+{"description": "head w/robots & charset",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag","meta",[{"namespace": null, "name": "name", "value": "robots"},{"namespace": null, "name": "content", "value": "noindex"}]], ["EmptyTag","meta",[{"namespace": null, "name": "charset", "value": "ascii"}]], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta content=noindex name=robots><meta charset=utf-8>"],
+ "xhtml": ["<head><meta content=\"noindex\" name=\"robots\" /><meta charset=\"utf-8\" /></head>"]
+},
+
+{"description": "head w/ charset in http-equiv content-type",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag","meta",[{"namespace": null, "name": "http-equiv", "value": "content-type"}, {"namespace": null, "name": "content", "value": "text/html; charset=ascii"}]], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta content=\"text/html; charset=utf-8\" http-equiv=content-type>"],
+ "xhtml": ["<head><meta content=\"text/html; charset=utf-8\" http-equiv=\"content-type\" /></head>"]
+},
+
+{"description": "head w/robots & charset in http-equiv content-type",
+ "options": {"inject_meta_charset": true, "encoding":"utf-8"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag","meta",[{"namespace": null, "name": "name", "value": "robots"},{"namespace": null, "name": "content", "value": "noindex"}]], ["EmptyTag","meta",[{"namespace": null, "name": "http-equiv", "value": "content-type"}, {"namespace": null, "name": "content", "value": "text/html; charset=ascii"}]], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": ["<meta content=noindex name=robots><meta content=\"text/html; charset=utf-8\" http-equiv=content-type>"],
+ "xhtml": ["<head><meta content=\"noindex\" name=\"robots\" /><meta content=\"text/html; charset=utf-8\" http-equiv=\"content-type\" /></head>"]
+}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/optionaltags.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/optionaltags.test
new file mode 100644
index 000000000..80a5edf89
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/optionaltags.test
@@ -0,0 +1,965 @@
+{"tests": [
+
+{"description": "html start-tag followed by text, with attributes",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", [{"namespace": null, "name": "lang", "value": "en"}]], ["Characters", "foo"]],
+ "expected": ["<html lang=en>foo"]
+},
+
+
+
+{"description": "html start-tag followed by comment",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", {}], ["Comment", "foo"]],
+ "expected": ["<html><!--foo-->"]
+},
+
+{"description": "html start-tag followed by space character",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", {}], ["Characters", " foo"]],
+ "expected": ["<html> foo"]
+},
+
+{"description": "html start-tag followed by text",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", {}], ["Characters", "foo"]],
+ "expected": ["foo"]
+},
+
+{"description": "html start-tag followed by start-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "html start-tag followed by end-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "html start-tag at EOF (shouldn't ever happen?!)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "html", {}]],
+ "expected": [""]
+},
+
+
+
+{"description": "html end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "html"], ["Comment", "foo"]],
+ "expected": ["</html><!--foo-->"]
+},
+
+{"description": "html end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "html"], ["Characters", " foo"]],
+ "expected": ["</html> foo"]
+},
+
+{"description": "html end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "html"], ["Characters", "foo"]],
+ "expected": ["foo"]
+},
+
+{"description": "html end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "html"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "html end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "html"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "html end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "html"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "head start-tag followed by comment",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["Comment", "foo"]],
+ "expected": ["<head><!--foo-->"]
+},
+
+{"description": "head start-tag followed by space character",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["Characters", " foo"]],
+ "expected": ["<head> foo"]
+},
+
+{"description": "head start-tag followed by text",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["Characters", "foo"]],
+ "expected": ["<head>foo"]
+},
+
+{"description": "head start-tag followed by start-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "head start-tag followed by end-tag (shouldn't ever happen?!)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["<head></foo>", "</foo>"]
+},
+
+{"description": "empty head element",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": [""]
+},
+
+{"description": "head start-tag followed by empty-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}], ["EmptyTag", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "head start-tag at EOF (shouldn't ever happen?!)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "head", {}]],
+ "expected": ["<head>", ""]
+},
+
+
+
+{"description": "head end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "head"], ["Comment", "foo"]],
+ "expected": ["</head><!--foo-->"]
+},
+
+{"description": "head end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "head"], ["Characters", " foo"]],
+ "expected": ["</head> foo"]
+},
+
+{"description": "head end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "head"], ["Characters", "foo"]],
+ "expected": ["foo"]
+},
+
+{"description": "head end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "head"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "head end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "head"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "head end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "head"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "body start-tag followed by comment",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "body", {}], ["Comment", "foo"]],
+ "expected": ["<body><!--foo-->"]
+},
+
+{"description": "body start-tag followed by space character",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "body", {}], ["Characters", " foo"]],
+ "expected": ["<body> foo"]
+},
+
+{"description": "body start-tag followed by text",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "body", {}], ["Characters", "foo"]],
+ "expected": ["foo"]
+},
+
+{"description": "body start-tag followed by start-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "body", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "body start-tag followed by end-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "body", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "body start-tag at EOF (shouldn't ever happen?!)",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "body", {}]],
+ "expected": [""]
+},
+
+
+
+{"description": "body end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "body"], ["Comment", "foo"]],
+ "expected": ["</body><!--foo-->"]
+},
+
+{"description": "body end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "body"], ["Characters", " foo"]],
+ "expected": ["</body> foo"]
+},
+
+{"description": "body end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "body"], ["Characters", "foo"]],
+ "expected": ["foo"]
+},
+
+{"description": "body end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "body"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "body end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "body"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "body end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "body"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "li end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"], ["Comment", "foo"]],
+ "expected": ["</li><!--foo-->"]
+},
+
+{"description": "li end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"], ["Characters", " foo"]],
+ "expected": ["</li> foo"]
+},
+
+{"description": "li end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"], ["Characters", "foo"]],
+ "expected": ["</li>foo"]
+},
+
+{"description": "li end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</li><foo>"]
+},
+
+{"description": "li end-tag followed by li start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"], ["StartTag", "http://www.w3.org/1999/xhtml", "li", {}]],
+ "expected": ["<li>"]
+},
+
+{"description": "li end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "li end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "li"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "dt end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["Comment", "foo"]],
+ "expected": ["</dt><!--foo-->"]
+},
+
+{"description": "dt end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["Characters", " foo"]],
+ "expected": ["</dt> foo"]
+},
+
+{"description": "dt end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["Characters", "foo"]],
+ "expected": ["</dt>foo"]
+},
+
+{"description": "dt end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</dt><foo>"]
+},
+
+{"description": "dt end-tag followed by dt start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["StartTag", "http://www.w3.org/1999/xhtml", "dt", {}]],
+ "expected": ["<dt>"]
+},
+
+{"description": "dt end-tag followed by dd start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["StartTag", "http://www.w3.org/1999/xhtml", "dd", {}]],
+ "expected": ["<dd>"]
+},
+
+{"description": "dt end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</dt></foo>"]
+},
+
+{"description": "dt end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dt"]],
+ "expected": ["</dt>"]
+},
+
+
+
+
+{"description": "dd end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["Comment", "foo"]],
+ "expected": ["</dd><!--foo-->"]
+},
+
+{"description": "dd end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["Characters", " foo"]],
+ "expected": ["</dd> foo"]
+},
+
+{"description": "dd end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["Characters", "foo"]],
+ "expected": ["</dd>foo"]
+},
+
+{"description": "dd end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</dd><foo>"]
+},
+
+{"description": "dd end-tag followed by dd start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["StartTag", "http://www.w3.org/1999/xhtml", "dd", {}]],
+ "expected": ["<dd>"]
+},
+
+{"description": "dd end-tag followed by dt start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["StartTag", "http://www.w3.org/1999/xhtml", "dt", {}]],
+ "expected": ["<dt>"]
+},
+
+{"description": "dd end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "dd end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "dd"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "p end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["Comment", "foo"]],
+ "expected": ["</p><!--foo-->"]
+},
+
+{"description": "p end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["Characters", " foo"]],
+ "expected": ["</p> foo"]
+},
+
+{"description": "p end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["Characters", "foo"]],
+ "expected": ["</p>foo"]
+},
+
+{"description": "p end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</p><foo>"]
+},
+
+{"description": "p end-tag followed by address start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "address", {}]],
+ "expected": ["<address>"]
+},
+
+{"description": "p end-tag followed by article start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "article", {}]],
+ "expected": ["<article>"]
+},
+
+{"description": "p end-tag followed by aside start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "aside", {}]],
+ "expected": ["<aside>"]
+},
+
+{"description": "p end-tag followed by blockquote start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "blockquote", {}]],
+ "expected": ["<blockquote>"]
+},
+
+{"description": "p end-tag followed by datagrid start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "datagrid", {}]],
+ "expected": ["<datagrid>"]
+},
+
+{"description": "p end-tag followed by dialog start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "dialog", {}]],
+ "expected": ["<dialog>"]
+},
+
+{"description": "p end-tag followed by dir start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "dir", {}]],
+ "expected": ["<dir>"]
+},
+
+{"description": "p end-tag followed by div start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "div", {}]],
+ "expected": ["<div>"]
+},
+
+{"description": "p end-tag followed by dl start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "dl", {}]],
+ "expected": ["<dl>"]
+},
+
+{"description": "p end-tag followed by fieldset start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "fieldset", {}]],
+ "expected": ["<fieldset>"]
+},
+
+{"description": "p end-tag followed by footer start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "footer", {}]],
+ "expected": ["<footer>"]
+},
+
+{"description": "p end-tag followed by form start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "form", {}]],
+ "expected": ["<form>"]
+},
+
+{"description": "p end-tag followed by h1 start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "h1", {}]],
+ "expected": ["<h1>"]
+},
+
+{"description": "p end-tag followed by h2 start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "h2", {}]],
+ "expected": ["<h2>"]
+},
+
+{"description": "p end-tag followed by h3 start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "h3", {}]],
+ "expected": ["<h3>"]
+},
+
+{"description": "p end-tag followed by h4 start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "h4", {}]],
+ "expected": ["<h4>"]
+},
+
+{"description": "p end-tag followed by h5 start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "h5", {}]],
+ "expected": ["<h5>"]
+},
+
+{"description": "p end-tag followed by h6 start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "h6", {}]],
+ "expected": ["<h6>"]
+},
+
+{"description": "p end-tag followed by header start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "header", {}]],
+ "expected": ["<header>"]
+},
+
+{"description": "p end-tag followed by hr empty-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["EmptyTag", "hr", {}]],
+ "expected": ["<hr>"]
+},
+
+{"description": "p end-tag followed by menu start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "menu", {}]],
+ "expected": ["<menu>"]
+},
+
+{"description": "p end-tag followed by nav start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "nav", {}]],
+ "expected": ["<nav>"]
+},
+
+{"description": "p end-tag followed by ol start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "ol", {}]],
+ "expected": ["<ol>"]
+},
+
+{"description": "p end-tag followed by p start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "p", {}]],
+ "expected": ["<p>"]
+},
+
+{"description": "p end-tag followed by pre start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "pre", {}]],
+ "expected": ["<pre>"]
+},
+
+{"description": "p end-tag followed by section start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "section", {}]],
+ "expected": ["<section>"]
+},
+
+{"description": "p end-tag followed by table start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "table", {}]],
+ "expected": ["<table>"]
+},
+
+{"description": "p end-tag followed by ul start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["StartTag", "http://www.w3.org/1999/xhtml", "ul", {}]],
+ "expected": ["<ul>"]
+},
+
+{"description": "p end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "p end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "p"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "optgroup end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"], ["Comment", "foo"]],
+ "expected": ["</optgroup><!--foo-->"]
+},
+
+{"description": "optgroup end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"], ["Characters", " foo"]],
+ "expected": ["</optgroup> foo"]
+},
+
+{"description": "optgroup end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"], ["Characters", "foo"]],
+ "expected": ["</optgroup>foo"]
+},
+
+{"description": "optgroup end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</optgroup><foo>"]
+},
+
+{"description": "optgroup end-tag followed by optgroup start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"], ["StartTag", "http://www.w3.org/1999/xhtml", "optgroup", {}]],
+ "expected": ["<optgroup>"]
+},
+
+{"description": "optgroup end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "optgroup end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "optgroup"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "option end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["Comment", "foo"]],
+ "expected": ["</option><!--foo-->"]
+},
+
+{"description": "option end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["Characters", " foo"]],
+ "expected": ["</option> foo"]
+},
+
+{"description": "option end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["Characters", "foo"]],
+ "expected": ["</option>foo"]
+},
+
+{"description": "option end-tag followed by optgroup start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["StartTag", "http://www.w3.org/1999/xhtml", "optgroup", {}]],
+ "expected": ["<optgroup>"]
+},
+
+{"description": "option end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</option><foo>"]
+},
+
+{"description": "option end-tag followed by option start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["StartTag", "http://www.w3.org/1999/xhtml", "option", {}]],
+ "expected": ["<option>"]
+},
+
+{"description": "option end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "option end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "option"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "colgroup start-tag followed by comment",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["Comment", "foo"]],
+ "expected": ["<colgroup><!--foo-->"]
+},
+
+{"description": "colgroup start-tag followed by space character",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["Characters", " foo"]],
+ "expected": ["<colgroup> foo"]
+},
+
+{"description": "colgroup start-tag followed by text",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["Characters", "foo"]],
+ "expected": ["<colgroup>foo"]
+},
+
+{"description": "colgroup start-tag followed by start-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<colgroup><foo>"]
+},
+
+{"description": "first colgroup in a table with a col child",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "table", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["EmptyTag", "col", {}]],
+ "expected": ["<table><col>"]
+},
+
+{"description": "colgroup with a col child, following another colgroup",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"], ["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "col", {}]],
+ "expected": ["</colgroup><col>", "<colgroup><col>"]
+},
+
+{"description": "colgroup start-tag followed by end-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["<colgroup></foo>"]
+},
+
+{"description": "colgroup start-tag at EOF",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "colgroup", {}]],
+ "expected": ["<colgroup>"]
+},
+
+
+
+{"description": "colgroup end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"], ["Comment", "foo"]],
+ "expected": ["</colgroup><!--foo-->"]
+},
+
+{"description": "colgroup end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"], ["Characters", " foo"]],
+ "expected": ["</colgroup> foo"]
+},
+
+{"description": "colgroup end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"], ["Characters", "foo"]],
+ "expected": ["foo"]
+},
+
+{"description": "colgroup end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<foo>"]
+},
+
+{"description": "colgroup end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "colgroup end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "colgroup"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "thead end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["Comment", "foo"]],
+ "expected": ["</thead><!--foo-->"]
+},
+
+{"description": "thead end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["Characters", " foo"]],
+ "expected": ["</thead> foo"]
+},
+
+{"description": "thead end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["Characters", "foo"]],
+ "expected": ["</thead>foo"]
+},
+
+{"description": "thead end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</thead><foo>"]
+},
+
+{"description": "thead end-tag followed by tbody start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}]],
+ "expected": ["<tbody>"]
+},
+
+{"description": "thead end-tag followed by tfoot start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["StartTag", "http://www.w3.org/1999/xhtml", "tfoot", {}]],
+ "expected": ["<tfoot>"]
+},
+
+{"description": "thead end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</thead></foo>"]
+},
+
+{"description": "thead end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"]],
+ "expected": ["</thead>"]
+},
+
+
+
+
+{"description": "tbody start-tag followed by comment",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["Comment", "foo"]],
+ "expected": ["<tbody><!--foo-->"]
+},
+
+{"description": "tbody start-tag followed by space character",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["Characters", " foo"]],
+ "expected": ["<tbody> foo"]
+},
+
+{"description": "tbody start-tag followed by text",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["Characters", "foo"]],
+ "expected": ["<tbody>foo"]
+},
+
+{"description": "tbody start-tag followed by start-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["<tbody><foo>"]
+},
+
+{"description": "first tbody in a table with a tr child",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "table", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "tr", {}]],
+ "expected": ["<table><tr>"]
+},
+
+{"description": "tbody with a tr child, following another tbody",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "tr", {}]],
+ "expected": ["<tbody><tr>", "</tbody><tr>"]
+},
+
+{"description": "tbody with a tr child, following a thead",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "thead"], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "tr", {}]],
+ "expected": ["<tbody><tr>", "</thead><tr>"]
+},
+
+{"description": "tbody with a tr child, following a tfoot",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["StartTag", "http://www.w3.org/1999/xhtml", "tr", {}]],
+ "expected": ["<tbody><tr>", "</tfoot><tr>"]
+},
+
+{"description": "tbody start-tag followed by end-tag",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["<tbody></foo>"]
+},
+
+{"description": "tbody start-tag at EOF",
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}]],
+ "expected": ["<tbody>"]
+},
+
+
+
+{"description": "tbody end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["Comment", "foo"]],
+ "expected": ["</tbody><!--foo-->"]
+},
+
+{"description": "tbody end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["Characters", " foo"]],
+ "expected": ["</tbody> foo"]
+},
+
+{"description": "tbody end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["Characters", "foo"]],
+ "expected": ["</tbody>foo"]
+},
+
+{"description": "tbody end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</tbody><foo>"]
+},
+
+{"description": "tbody end-tag followed by tbody start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}]],
+ "expected": ["<tbody>", "</tbody>"]
+},
+
+{"description": "tbody end-tag followed by tfoot start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["StartTag", "http://www.w3.org/1999/xhtml", "tfoot", {}]],
+ "expected": ["<tfoot>"]
+},
+
+{"description": "tbody end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "tbody end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tbody"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "tfoot end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["Comment", "foo"]],
+ "expected": ["</tfoot><!--foo-->"]
+},
+
+{"description": "tfoot end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["Characters", " foo"]],
+ "expected": ["</tfoot> foo"]
+},
+
+{"description": "tfoot end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["Characters", "foo"]],
+ "expected": ["</tfoot>foo"]
+},
+
+{"description": "tfoot end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</tfoot><foo>"]
+},
+
+{"description": "tfoot end-tag followed by tbody start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["StartTag", "http://www.w3.org/1999/xhtml", "tbody", {}]],
+ "expected": ["<tbody>", "</tfoot>"]
+},
+
+{"description": "tfoot end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "tfoot end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tfoot"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "tr end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"], ["Comment", "foo"]],
+ "expected": ["</tr><!--foo-->"]
+},
+
+{"description": "tr end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"], ["Characters", " foo"]],
+ "expected": ["</tr> foo"]
+},
+
+{"description": "tr end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"], ["Characters", "foo"]],
+ "expected": ["</tr>foo"]
+},
+
+{"description": "tr end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</tr><foo>"]
+},
+
+{"description": "tr end-tag followed by tr start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"], ["StartTag", "http://www.w3.org/1999/xhtml", "tr", {}]],
+ "expected": ["<tr>", "</tr>"]
+},
+
+{"description": "tr end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "tr end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "tr"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "td end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["Comment", "foo"]],
+ "expected": ["</td><!--foo-->"]
+},
+
+{"description": "td end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["Characters", " foo"]],
+ "expected": ["</td> foo"]
+},
+
+{"description": "td end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["Characters", "foo"]],
+ "expected": ["</td>foo"]
+},
+
+{"description": "td end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</td><foo>"]
+},
+
+{"description": "td end-tag followed by td start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["StartTag", "http://www.w3.org/1999/xhtml", "td", {}]],
+ "expected": ["<td>", "</td>"]
+},
+
+{"description": "td end-tag followed by th start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["StartTag", "http://www.w3.org/1999/xhtml", "th", {}]],
+ "expected": ["<th>", "</td>"]
+},
+
+{"description": "td end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "td end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "td"]],
+ "expected": [""]
+},
+
+
+
+
+{"description": "th end-tag followed by comment",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["Comment", "foo"]],
+ "expected": ["</th><!--foo-->"]
+},
+
+{"description": "th end-tag followed by space character",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["Characters", " foo"]],
+ "expected": ["</th> foo"]
+},
+
+{"description": "th end-tag followed by text",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["Characters", "foo"]],
+ "expected": ["</th>foo"]
+},
+
+{"description": "th end-tag followed by start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["StartTag", "http://www.w3.org/1999/xhtml", "foo", {}]],
+ "expected": ["</th><foo>"]
+},
+
+{"description": "th end-tag followed by th start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["StartTag", "http://www.w3.org/1999/xhtml", "th", {}]],
+ "expected": ["<th>", "</th>"]
+},
+
+{"description": "th end-tag followed by td start-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["StartTag", "http://www.w3.org/1999/xhtml", "td", {}]],
+ "expected": ["<td>", "</th>"]
+},
+
+{"description": "th end-tag followed by end-tag",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml", "th"], ["EndTag", "http://www.w3.org/1999/xhtml", "foo"]],
+ "expected": ["</foo>"]
+},
+
+{"description": "th end-tag at EOF",
+ "input": [["EndTag", "http://www.w3.org/1999/xhtml" , "th"]],
+ "expected": [""]
+}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/options.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/options.test
new file mode 100644
index 000000000..6f342dd3f
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/options.test
@@ -0,0 +1,60 @@
+{"tests":[
+
+{"description": "quote_char=\"'\"",
+ "options": {"quote_char": "'"},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": null, "name": "title", "value": "test 'with' quote_char"}]]],
+ "expected": ["<span title='test &#39;with&#39; quote_char'>"]
+},
+
+{"description": "quote_attr_values=true",
+ "options": {"quote_attr_values": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "button", [{"namespace": null, "name": "disabled", "value" :"disabled"}]]],
+ "expected": ["<button disabled>"],
+ "xhtml": ["<button disabled=\"disabled\">"]
+},
+
+{"description": "quote_attr_values=true with irrelevant",
+ "options": {"quote_attr_values": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "div", [{"namespace": null, "name": "irrelevant", "value" :"irrelevant"}]]],
+ "expected": ["<div irrelevant>"],
+ "xhtml": ["<div irrelevant=\"irrelevant\">"]
+},
+
+{"description": "use_trailing_solidus=true with void element",
+ "options": {"use_trailing_solidus": true},
+ "input": [["EmptyTag", "img", {}]],
+ "expected": ["<img />"]
+},
+
+{"description": "use_trailing_solidus=true with non-void element",
+ "options": {"use_trailing_solidus": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "div", {}]],
+ "expected": ["<div>"]
+},
+
+{"description": "minimize_boolean_attributes=false",
+ "options": {"minimize_boolean_attributes": false},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "div", [{"namespace": null, "name": "irrelevant", "value" :"irrelevant"}]]],
+ "expected": ["<div irrelevant=irrelevant>"],
+ "xhtml": ["<div irrelevant=\"irrelevant\">"]
+},
+
+{"description": "minimize_boolean_attributes=false with empty value",
+ "options": {"minimize_boolean_attributes": false},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "div", [{"namespace": null, "name": "irrelevant", "value" :""}]]],
+ "expected": ["<div irrelevant=\"\">"]
+},
+
+{"description": "escape less than signs in attribute values",
+ "options": {"escape_lt_in_attrs": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "a", [{"namespace": null, "name": "title", "value": "a<b>c&d"}]]],
+ "expected": ["<a title=\"a&lt;b>c&amp;d\">"]
+},
+
+{"description": "rcdata",
+ "options": {"escape_rcdata": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "script", {}], ["Characters", "a<b>c&d"]],
+ "expected": ["<script>a&lt;b&gt;c&amp;d"]
+}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/whitespace.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/whitespace.test
new file mode 100644
index 000000000..e5d050d3b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/serializer/whitespace.test
@@ -0,0 +1,51 @@
+{"tests": [
+
+{"description": "bare text with leading spaces",
+ "options": {"strip_whitespace": true},
+ "input": [["Characters", "\t\r\n\u000C foo"]],
+ "expected": [" foo"]
+},
+
+{"description": "bare text with trailing spaces",
+ "options": {"strip_whitespace": true},
+ "input": [["Characters", "foo \t\r\n\u000C"]],
+ "expected": ["foo "]
+},
+
+{"description": "bare text with inner spaces",
+ "options": {"strip_whitespace": true},
+ "input": [["Characters", "foo \t\r\n\u000C bar"]],
+ "expected": ["foo bar"]
+},
+
+{"description": "text within <pre>",
+ "options": {"strip_whitespace": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "pre", {}], ["Characters", "\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C"], ["EndTag", "http://www.w3.org/1999/xhtml", "pre"]],
+ "expected": ["<pre>\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C</pre>"]
+},
+
+{"description": "text within <pre>, with inner markup",
+ "options": {"strip_whitespace": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "pre", {}], ["Characters", "\t\r\n\u000C fo"], ["StartTag", "http://www.w3.org/1999/xhtml", "span", {}], ["Characters", "o \t\r\n\u000C b"], ["EndTag", "http://www.w3.org/1999/xhtml", "span"], ["Characters", "ar \t\r\n\u000C"], ["EndTag", "http://www.w3.org/1999/xhtml", "pre"]],
+ "expected": ["<pre>\t\r\n\u000C fo<span>o \t\r\n\u000C b</span>ar \t\r\n\u000C</pre>"]
+},
+
+{"description": "text within <textarea>",
+ "options": {"strip_whitespace": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "textarea", {}], ["Characters", "\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C"], ["EndTag", "http://www.w3.org/1999/xhtml", "textarea"]],
+ "expected": ["<textarea>\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C</textarea>"]
+},
+
+{"description": "text within <script>",
+ "options": {"strip_whitespace": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "script", {}], ["Characters", "\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C"], ["EndTag", "http://www.w3.org/1999/xhtml", "script"]],
+ "expected": ["<script>\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C</script>"]
+},
+
+{"description": "text within <style>",
+ "options": {"strip_whitespace": true},
+ "input": [["StartTag", "http://www.w3.org/1999/xhtml", "style", {}], ["Characters", "\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C"], ["EndTag", "http://www.w3.org/1999/xhtml", "style"]],
+ "expected": ["<style>\t\r\n\u000C foo \t\r\n\u000C bar \t\r\n\u000C</style>"]
+}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sniffer/htmlOrFeed.json b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sniffer/htmlOrFeed.json
new file mode 100644
index 000000000..c1506dbcd
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/sniffer/htmlOrFeed.json
@@ -0,0 +1,43 @@
+[
+ {"type": "text/html", "input": ""},
+ {"type": "text/html", "input": "<!---->"},
+ {"type": "text/html", "input": "<!--asdfaslkjdf;laksjdf as;dkfjsd-->"},
+ {"type": "text/html", "input": "<!"},
+ {"type": "text/html", "input": "\t"},
+ {"type": "text/html", "input": "<!>"},
+ {"type": "text/html", "input": "<?"},
+ {"type": "text/html", "input": "<??>"},
+ {"type": "application/rss+xml", "input": "<rss"},
+ {"type": "application/atom+xml", "input": "<feed"},
+ {"type": "text/html", "input": "<html"},
+ {"type": "text/html", "input": "<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>302 Found</title>\n</head><body>\n<h1>Found</h1>\n<p>The document has moved <a href=\"http://feeds.feedburner.com/gofug\">here</a>.</p>\n</body></html>\n"},
+ {"type": "text/html", "input": "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\r\n<HTML><HEAD>\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"http://cache.blogads.com/289619328/feed.css\" /><link rel=\"stylesheet\" type=\"text/css\" href=\"http://cache.blogads.com/431602649/feed.css\" />\r\n<link rel=\"stylesheet\" type=\"text/css\" href=\"http://cache.blogads.com/382549546/feed.css\" />\r\n<link rel=\"stylesheet\" type=\"text/css\" href=\"http://cache.blogads.com/314618017/feed.css\" /><META http-equiv=\"expires\" content="},
+ {"type": "text/html", "input": "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">\r\n<html>\r\n<head>\r\n<title>Xiaxue - Chicken pie blogger.</title><meta http-equiv=\"Content-Type\" content=\"text/html; charset=iso-8859-1\"><style type=\"text/css\">\r\n<style type=\"text/css\">\r\n<!--\r\nbody {\r\n background-color: #FFF2F2;\r\n}\r\n.style1 {font-family: Georgia, \"Times New Roman\", Times, serif}\r\n.style2 {\r\n color: #8a567c;\r\n font-size: 14px;\r\n font-family: Georgia, \"Times New Roman\", Times, serif;\r\n}\r"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"><html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\r\n<head> \r\n<title>Google Operating System</title>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\r\n<meta name=\"Description\" content=\"Unofficial news and tips about Google. A blog that watches Google's latest developments and the attempts to move your operating system online.\" />\r\n<meta name=\"generator\" c"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"><html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\r\n<head>\r\n <title>Assimilated Press</title> <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\r\n<meta name=\"MSSmartTagsPreventParsing\" content=\"true\" />\r\n<meta name=\"generator\" content=\"Blogger\" />\r\n<link rel=\"alternate\" type=\"application/atom+xml\" title=\"Assimilated Press - Atom\" href=\"http://assimila"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"><html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\r\n<head>\r\n <title>PostSecret</title>\r\n<META name=\"keywords\" Content=\"secrets, postcard, secret, postcards, postsecret, postsecrets,online confessional, post secret, post secrets, artomatic, post a secret\"><META name=\"discription\" Content=\"See a Secret...Share a Secret\"> <meta http-equiv=\"Content-Type\" content=\"te"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html xmlns='http://www.w3.org/1999/xhtml' xmlns:b='http://www.google.com/2005/gml/b' xmlns:data='http://www.google.com/2005/gml/data' xmlns:expr='http://www.google.com/2005/gml/expr'>\n <head>\n \n <meta content='text/html; charset=UTF-8' http-equiv='Content-Type'/>\n <meta content='true' name='MSSmartTagsPreventParsing'/>\n <meta content='blogger' name='generator'/>\n <link rel=\"alternate\" typ"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" dir=\"ltr\" lang=\"ja\">\n<head profile=\"http://gmpg.org/xfn/11\"> \n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" /> \n<title> CMS Lever</title><link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"http://s.wordpress.com/wp-content/themes/pub/twenty-eight/2813.css\"/>\n<link rel=\"alternate\" type=\"application/rss+xml\" title=\"RSS 2.0\" h"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" dir=\"ltr\" lang=\"en\"><head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\n<title> Park Avenue Peerage</title>\t<meta name=\"generator\" content=\"WordPress.com\" />\t<!-- feeds -->\n\t<link rel=\"alternate\" type=\"application/rss+xml\" title=\"RSS 2.0\" href=\"http://parkavenuepeerage.wordpress.com/feed/\" />\t<link rel=\"pingback\" href="},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" dir=\"ltr\" lang=\"ja\"><head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\n<title> \u884c\u96f2\u6d41\u6c34 -like a floating clouds and running water-</title>\t<meta name=\"generator\" content=\"WordPress.com\" />\t<!-- feeds -->\n\t<link rel=\"alternate\" type=\"application/rss+xml\" title=\"RSS 2.0\" href=\"http://shw4.wordpress.com/feed/\" />\t<li"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"><html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n<meta name=\"generator\" content=\"http://www.typepad.com/\" />\n<title>Go Fug Yourself</title><link rel=\"stylesheet\" href=\"http://gofugyourself.typepad.com/go_fug_yourself/styles.css\" type=\"text/css\" />\n<link rel=\"alternate\" type=\"application/atom+xml\" title=\"Atom\" "},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" dir=\"ltr\" lang=\"en\"><head profile=\"http://gmpg.org/xfn/11\">\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" /><title> Ladies&#8230;</title><meta name=\"generator\" content=\"WordPress.com\" /> <!-- leave this for stats --><link rel=\"stylesheet\" href=\"http://s.wordpress.com/wp-content/themes/default/style.css?1\" type=\"tex"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\r\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\r\n<head>\r\n <title>The Sartorialist</title> <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\r\n<meta name=\"MSSmartTagsPreventParsing\" content=\"true\" />\r\n<meta name=\"generator\" content=\"Blogger\" />\r\n<link rel=\"alternate\" type=\"application/atom+xml\" title=\"The Sartorialist - Atom\" href=\"http://thesartorialist.blogspot"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"><html xmlns=\"http://www.w3.org/1999/xhtml\" lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=ISO-8859-1\" />\n<meta name=\"generator\" content=\"http://www.typepad.com/\" />\n<title>Creating Passionate Users</title><link rel=\"stylesheet\" href=\"http://headrush.typepad.com/creating_passionate_users/styles.css\" type=\"text/css\" />\n<link rel=\"alternate\" type"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n\t\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" id=\"sixapart-standard\">\n<head>\n\t<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n\t<meta name=\"generator\" content=\"http://www.typepad.com/\" />\n\t\n\t\n <meta name=\"keywords\" content=\"marketing, blog, seth, ideas, respect, permission\" />\n <meta name=\"description\" content=\"Seth Godin's riffs on marketing, respect, and the "},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n\t\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" id=\"sixapart-standard\">\n<head>\n\t<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n\t<meta name=\"generator\" content=\"http://www.typepad.com/\" />\n\t\n\t\n \n <meta name=\"description\" content=\" Western Civilization hangs in the balance. This blog is part of the solution,the cure. Get your heads out of the sand and Fight the G"},
+ {"type": "text/html", "input": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.1//EN\" \"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" dir=\"ltr\" lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=pahrefhttpwwwfeedburnercomtarget_blankimgsrchttpwwwfeedburnercomfbimagespubpowered_by_fbgifaltPoweredbyFeedBurnerstyleborder0ap\" />\n<title> From Under the Rotunda</title>\n<link rel=\"stylesheet\" href=\"http://s.wordpress.com/wp-content/themes/pub/andreas04/style.css\" type=\"text/css\""},
+ {"type": "application/atom+xml", "input": "<?xml version='1.0' encoding='UTF-8'?><?xml-stylesheet href=\"http://www.blogger.com/styles/atom.css\" type=\"text/css\"?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'><id>tag:blogger.com,1999:blog-10861780</id><updated>2007-07-27T12:38:50.888-07:00</updated><title type='text'>Official Google Blog</title><link rel='alternate' type='text/html' href='http://googleblog.blogspot.com/'/><link rel='next' type='application/atom+xml' href='http://googleblog.blogs"},
+ {"type": "application/rss+xml", "input": "<?xml version='1.0' encoding='UTF-8'?><rss xmlns:atom='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' version='2.0'><channel><atom:id>tag:blogger.com,1999:blog-10861780</atom:id><lastBuildDate>Fri, 27 Jul 2007 19:38:50 +0000</lastBuildDate><title>Official Google Blog</title><description/><link>http://googleblog.blogspot.com/</link><managingEditor>Eric Case</managingEditor><generator>Blogger</generator><openSearch:totalResults>729</openSearch:totalResults><openSearc"},
+ {"type": "application/rss+xml", "input": "<?xml version=\"1.0\" encoding=\"pahrefhttpwwwfeedburnercomtarget_blankimgsrchttpwwwfeedburnercomfbimagespubpowered_by_fbgifaltPoweredbyFeedBurnerstyleborder0ap\"?>\n<!-- generator=\"wordpress/MU\" -->\n<rss version=\"2.0\"\n\txmlns:content=\"http://purl.org/rss/1.0/modules/content/\"\n\txmlns:wfw=\"http://wellformedweb.org/CommentAPI/\"\n\txmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n\t><channel>\n\t<title>From Under the Rotunda</title>\n\t<link>http://dannybernardi.wordpress.com</link>\n\t<description>The Monographs of Danny Ber"},
+ {"type": "application/rss+xml", "input": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!-- generator=\"wordpress/MU\" -->\n<rss version=\"2.0\"\n\txmlns:content=\"http://purl.org/rss/1.0/modules/content/\"\n\txmlns:wfw=\"http://wellformedweb.org/CommentAPI/\"\n\txmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n\t><channel>\n\t<title>CMS Lever</title>\n\t<link>http://kanaguri.wordpress.com</link>\n\t<description>CMS\u306e\u6c17\u306b\u306a\u3063\u305f\u3053\u3068</description>\n\t<pubDate>Wed, 18 Jul 2007 21:26:22 +0000</pubDate>\n\t<generator>http://wordpress.org/?v=MU</generator>\n\t<language>ja</languag"},
+ {"type": "application/atom+xml", "input": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<feed xmlns=\"http://www.w3.org/2005/Atom\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:thr=\"http://purl.org/syndication/thread/1.0\">\n <title>Atlas Shrugs</title>\n <link rel=\"self\" type=\"application/atom+xml\" href=\"http://atlasshrugs2000.typepad.com/atlas_shrugs/atom.xml\" />\n <link rel=\"alternate\" type=\"text/html\" href=\"http://atlasshrugs2000.typepad.com/atlas_shrugs/\" />\n <id>tag:typepad.com,2003:weblog-132946</id>\n <updated>2007-08-15T16:07:34-04"},
+ {"type": "application/atom+xml", "input": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/atom10full.xsl\" type=\"text/xsl\" media=\"screen\"?><?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/itemcontent.css\" type=\"text/css\" media=\"screen\"?><feed xmlns=\"http://www.w3.org/2005/Atom\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:thr=\"http://purl.org/syndication/thread/1.0\" xmlns:feedburner=\"http://rssnamespace.org/feedburner/ext/1.0\">\r\n <title>Creating Passionate Users</title>\r\n "},
+ {"type": "application/atom+xml", "input": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/atom10full.xsl\" type=\"text/xsl\" media=\"screen\"?><?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/itemcontent.css\" type=\"text/css\" media=\"screen\"?><feed xmlns=\"http://www.w3.org/2005/Atom\" xmlns:feedburner=\"http://rssnamespace.org/feedburner/ext/1.0\">\r\n <title>Seth's Blog</title>\r\n <link rel=\"alternate\" type=\"text/html\" href=\"http://sethgodin.typepad.com/seths_blog/\" />\r\n <link rel=\"s"},
+ {"type": "application/atom+xml", "input": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/atom10full.xsl\" type=\"text/xsl\" media=\"screen\"?><?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/itemcontent.css\" type=\"text/css\" media=\"screen\"?><feed xmlns=\"http://www.w3.org/2005/Atom\" xmlns:openSearch=\"http://a9.com/-/spec/opensearchrss/1.0/\" xmlns:feedburner=\"http://rssnamespace.org/feedburner/ext/1.0\"><id>tag:blogger.com,1999:blog-32454861</id><updated>2007-07-31T21:44:09.867+02:00</upd"},
+ {"type": "application/atom+xml", "input": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/atomfull.xsl\" type=\"text/xsl\" media=\"screen\"?><?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/itemcontent.css\" type=\"text/css\" media=\"screen\"?><feed xmlns=\"http://purl.org/atom/ns#\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:feedburner=\"http://rssnamespace.org/feedburner/ext/1.0\" version=\"0.3\">\r\n <title>Go Fug Yourself</title>\r\n <link rel=\"alternate\" type=\"text/html\" href=\"http://go"},
+ {"type": "application/rss+xml", "input": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/rss2full.xsl\" type=\"text/xsl\" media=\"screen\"?><?xml-stylesheet href=\"http://feeds.feedburner.com/~d/styles/itemcontent.css\" type=\"text/css\" media=\"screen\"?><rss xmlns:creativeCommons=\"http://backend.userland.com/creativeCommonsRssModule\" xmlns:feedburner=\"http://rssnamespace.org/feedburner/ext/1.0\" version=\"2.0\"><channel><title>Google Operating System</title><link>http://googlesystem.blogspot.com/</link>"},
+ {"type": "application/rss+xml", "input": "<?xml version=\"1.0\" encoding=\"\"?>\n<!-- generator=\"wordpress/MU\" -->\n<rss version=\"2.0\"\n\txmlns:content=\"http://purl.org/rss/1.0/modules/content/\"\n\txmlns:wfw=\"http://wellformedweb.org/CommentAPI/\"\n\txmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n\t><channel>\n\t<title>Nunublog</title>\n\t<link>http://nunubh.wordpress.com</link>\n\t<description>Just Newbie Blog!</description>\n\t<pubDate>Mon, 09 Jul 2007 18:54:09 +0000</pubDate>\n\t<generator>http://wordpress.org/?v=MU</generator>\n\t<language>id</language>\n\t\t\t<item>\n\t\t<ti"},
+ {"type": "text/html", "input": "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\r\n<HEAD>\r\n<TITLE>Design*Sponge</TITLE><meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\r\n<meta name=\"MSSmartTagsPreventParsing\" content=\"true\" />\r\n<meta name=\"generator\" content=\"Blogger\" />\r\n<link rel=\"alternate\" type=\"application/atom+xml\" title=\"Design*Sponge - Atom\" href=\"http://designsponge.blogspot.com/feeds/posts/default\" />\r\n<link rel=\"alternate\" type=\"application/rss+xml\" title=\"Design*Sponge - RSS\" href="},
+ {"type": "text/html", "input": "<HTML>\n<HEAD>\n<TITLE>Moved Temporarily</TITLE>\n</HEAD>\n<BODY BGCOLOR=\"#FFFFFF\" TEXT=\"#000000\">\n<H1>Moved Temporarily</H1>\nThe document has moved <A HREF=\"http://feeds.feedburner.com/thesecretdiaryofstevejobs\">here</A>.\n</BODY>\n</HTML>\n"}
+] \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/README.md b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/README.md
new file mode 100644
index 000000000..4218c26bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/README.md
@@ -0,0 +1,104 @@
+Tokenizer tests
+===============
+
+The test format is [JSON](http://www.json.org/). This has the advantage
+that the syntax allows backward-compatible extensions to the tests and
+the disadvantage that it is relatively verbose.
+
+Basic Structure
+---------------
+
+ {"tests": [
+     {"description": "Test description",
+     "input": "input_string",
+     "output": [expected_output_tokens],
+     "initialStates": [initial_states],
+     "lastStartTag": last_start_tag,
+     "ignoreErrorOrder": ignore_error_order
+     }
+ ]}
+
+Multiple tests per file are allowed simply by adding more objects to the
+"tests" list.
+
+`description`, `input` and `output` are always present. The other values
+are optional.
+
+### Test set-up
+
+`test.input` is a string containing the characters to pass to the
+tokenizer. Specifically, it represents the characters of the **input
+stream**, and so implementations are expected to perform the processing
+described in the spec's **Preprocessing the input stream** section
+before feeding the result to the tokenizer.
+
+If `test.doubleEscaped` is present and `true`, then `test.input` is not
+quite as described above. Instead, it must first be subjected to another
+round of unescaping (i.e., in addition to any unescaping involved in the
+JSON import), and the result of *that* represents the characters of the
+input stream. Currently, the only unescaping required by this option is
+to convert each sequence of the form \\uHHHH (where H is a hex digit)
+into the corresponding Unicode code point. (Note that this option also
+affects the interpretation of `test.output`.)
+
+`test.initialStates` is a list of strings, each being the name of a
+tokenizer state. The test should be run once for each string, using it
+to set the tokenizer's initial state for that run. If
+`test.initialStates` is omitted, it defaults to `["data state"]`.
+
+`test.lastStartTag` is a lowercase string that should be used as "the
+tag name of the last start tag to have been emitted from this
+tokenizer", referenced in the spec's definition of **appropriate end tag
+token**. If it is omitted, it is treated as if "no start tag has been
+emitted from this tokenizer".
+
+### Test results
+
+`test.output` is a list of tokens, ordered with the first produced by
+the tokenizer the first (leftmost) in the list. The list must mach the
+**complete** list of tokens that the tokenizer should produce. Valid
+tokens are:
+
+ ["DOCTYPE", name, public_id, system_id, correctness]
+ ["StartTag", name, {attributes}*, true*]
+ ["StartTag", name, {attributes}]
+ ["EndTag", name]
+ ["Comment", data]
+ ["Character", data]
+ "ParseError"
+
+`public_id` and `system_id` are either strings or `null`. `correctness`
+is either `true` or `false`; `true` corresponds to the force-quirks flag
+being false, and vice-versa.
+
+When the self-closing flag is set, the `StartTag` array has `true` as
+its fourth entry. When the flag is not set, the array has only three
+entries for backwards compatibility.
+
+All adjacent character tokens are coalesced into a single
+`["Character", data]` token.
+
+If `test.doubleEscaped` is present and `true`, then every string within
+`test.output` must be further unescaped (as described above) before
+comparing with the tokenizer's output.
+
+`test.ignoreErrorOrder` is a boolean value indicating that the order of
+`ParseError` tokens relative to other tokens in the output stream is
+unimportant, and implementations should ignore such differences between
+their output and `expected_output_tokens`. (This is used for errors
+emitted by the input stream preprocessing stage, since it is useful to
+test that code but it is undefined when the errors occur). If it is
+omitted, it defaults to `false`.
+
+xmlViolation tests
+------------------
+
+`tokenizer/xmlViolation.test` differs from the above in a couple of
+ways:
+
+- The name of the single member of the top-level JSON object is
+ "xmlViolationTests" instead of "tests".
+- Each test's expected output assumes that implementation is applying
+ the tweaks given in the spec's "Coercing an HTML DOM into an
+ infoset" section.
+
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/contentModelFlags.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/contentModelFlags.test
new file mode 100644
index 000000000..89b8170c6
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/contentModelFlags.test
@@ -0,0 +1,81 @@
+{"tests": [
+
+{"description":"PLAINTEXT content model flag",
+"initialStates":["PLAINTEXT state"],
+"lastStartTag":"plaintext",
+"input":"<head>&body;",
+"output":[["Character", "<head>&body;"]]},
+
+{"description":"End tag closing RCDATA or RAWTEXT",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xmp>",
+"output":[["Character", "foo"], ["EndTag", "xmp"]]},
+
+{"description":"End tag closing RCDATA or RAWTEXT (case-insensitivity)",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xMp>",
+"output":[["Character", "foo"], ["EndTag", "xmp"]]},
+
+{"description":"End tag closing RCDATA or RAWTEXT (ending with space)",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xmp ",
+"output":[["Character", "foo"], "ParseError"]},
+
+{"description":"End tag closing RCDATA or RAWTEXT (ending with EOF)",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xmp",
+"output":[["Character", "foo</xmp"]]},
+
+{"description":"End tag closing RCDATA or RAWTEXT (ending with slash)",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xmp/",
+"output":[["Character", "foo"], "ParseError"]},
+
+{"description":"End tag not closing RCDATA or RAWTEXT (ending with left-angle-bracket)",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xmp<",
+"output":[["Character", "foo</xmp<"]]},
+
+{"description":"End tag with incorrect name in RCDATA or RAWTEXT",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"</foo>bar</xmp>",
+"output":[["Character", "</foo>bar"], ["EndTag", "xmp"]]},
+
+{"description":"Partial end tags leading straight into partial end tags",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"</xmp</xmp</xmp>",
+"output":[["Character", "</xmp</xmp"], ["EndTag", "xmp"]]},
+
+{"description":"End tag with incorrect name in RCDATA or RAWTEXT (starting like correct name)",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"</foo>bar</xmpaar>",
+"output":[["Character", "</foo>bar</xmpaar>"]]},
+
+{"description":"End tag closing RCDATA or RAWTEXT, switching back to PCDATA",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo</xmp></baz>",
+"output":[["Character", "foo"], ["EndTag", "xmp"], ["EndTag", "baz"]]},
+
+{"description":"RAWTEXT w/ something looking like an entity",
+"initialStates":["RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"&foo;",
+"output":[["Character", "&foo;"]]},
+
+{"description":"RCDATA w/ an entity",
+"initialStates":["RCDATA state"],
+"lastStartTag":"textarea",
+"input":"&lt;",
+"output":[["Character", "<"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/domjs.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/domjs.test
new file mode 100644
index 000000000..8f1e42f35
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/domjs.test
@@ -0,0 +1,96 @@
+{
+ "tests": [
+ {
+ "description":"CR in bogus comment state",
+ "input":"<?\u000d",
+ "output":["ParseError", ["Comment", "?\u000a"]]
+ },
+ {
+ "description":"CRLF in bogus comment state",
+ "input":"<?\u000d\u000a",
+ "output":["ParseError", ["Comment", "?\u000a"]]
+ },
+ {
+ "description":"CRLFLF in bogus comment state",
+ "input":"<?\u000d\u000a\u000a",
+ "output":["ParseError", ["Comment", "?\u000a\u000a"]]
+ },
+ {
+ "description":"NUL in RCDATA and RAWTEXT",
+ "doubleEscaped":true,
+ "initialStates":["RCDATA state", "RAWTEXT state"],
+ "input":"\\u0000",
+ "output":["ParseError", ["Character", "\\uFFFD"]]
+ },
+ {
+ "description":"leading U+FEFF must pass through",
+ "doubleEscaped":true,
+ "input":"\\uFEFFfoo\\uFEFFbar",
+ "output":[["Character", "\\uFEFFfoo\\uFEFFbar"]]
+ },
+ {
+ "description":"Non BMP-charref in in RCDATA",
+ "initialStates":["RCDATA state"],
+ "input":"&NotEqualTilde;",
+ "output":[["Character", "\u2242\u0338"]]
+ },
+ {
+ "description":"Bad charref in in RCDATA",
+ "initialStates":["RCDATA state"],
+ "input":"&NotEqualTild;",
+ "output":["ParseError", ["Character", "&NotEqualTild;"]]
+ },
+ {
+ "description":"lowercase endtags in RCDATA and RAWTEXT",
+ "initialStates":["RCDATA state", "RAWTEXT state"],
+ "lastStartTag":"xmp",
+ "input":"</XMP>",
+ "output":[["EndTag","xmp"]]
+ },
+ {
+ "description":"bad endtag in RCDATA and RAWTEXT",
+ "initialStates":["RCDATA state", "RAWTEXT state"],
+ "lastStartTag":"xmp",
+ "input":"</ XMP>",
+ "output":[["Character","</ XMP>"]]
+ },
+ {
+ "description":"bad endtag in RCDATA and RAWTEXT",
+ "initialStates":["RCDATA state", "RAWTEXT state"],
+ "lastStartTag":"xmp",
+ "input":"</xm>",
+ "output":[["Character","</xm>"]]
+ },
+ {
+ "description":"bad endtag in RCDATA and RAWTEXT",
+ "initialStates":["RCDATA state", "RAWTEXT state"],
+ "lastStartTag":"xmp",
+ "input":"</xm ",
+ "output":[["Character","</xm "]]
+ },
+ {
+ "description":"bad endtag in RCDATA and RAWTEXT",
+ "initialStates":["RCDATA state", "RAWTEXT state"],
+ "lastStartTag":"xmp",
+ "input":"</xm/",
+ "output":[["Character","</xm/"]]
+ },
+ {
+ "description":"Non BMP-charref in attribute",
+ "input":"<p id=\"&NotEqualTilde;\">",
+ "output":[["StartTag", "p", {"id":"\u2242\u0338"}]]
+ },
+ {
+ "description":"--!NUL in comment ",
+ "doubleEscaped":true,
+ "input":"<!----!\\u0000-->",
+ "output":["ParseError", "ParseError", ["Comment", "--!\\uFFFD"]]
+ },
+ {
+ "description":"space EOF after doctype ",
+ "input":"<!DOCTYPE html ",
+ "output":["ParseError", ["DOCTYPE", "html", null, null , false]]
+ }
+
+ ]
+}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/entities.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/entities.test
new file mode 100644
index 000000000..27b85a1cd
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/entities.test
@@ -0,0 +1,283 @@
+{"tests": [
+
+{"description": "Undefined named entity in attribute value ending in semicolon and whose name starts with a known entity name.",
+"input":"<h a='&noti;'>",
+"output": [["StartTag", "h", {"a": "&noti;"}]]},
+
+{"description": "Entity name followed by the equals sign in an attribute value.",
+"input":"<h a='&lang='>",
+"output": [["StartTag", "h", {"a": "&lang="}]]},
+
+{"description": "CR as numeric entity",
+"input":"&#013;",
+"output": ["ParseError", ["Character", "\r"]]},
+
+{"description": "CR as hexadecimal numeric entity",
+"input":"&#x00D;",
+"output": ["ParseError", ["Character", "\r"]]},
+
+{"description": "Windows-1252 EURO SIGN numeric entity.",
+"input":"&#0128;",
+"output": ["ParseError", ["Character", "\u20AC"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR numeric entity.",
+"input":"&#0129;",
+"output": ["ParseError", ["Character", "\u0081"]]},
+
+{"description": "Windows-1252 SINGLE LOW-9 QUOTATION MARK numeric entity.",
+"input":"&#0130;",
+"output": ["ParseError", ["Character", "\u201A"]]},
+
+{"description": "Windows-1252 LATIN SMALL LETTER F WITH HOOK numeric entity.",
+"input":"&#0131;",
+"output": ["ParseError", ["Character", "\u0192"]]},
+
+{"description": "Windows-1252 DOUBLE LOW-9 QUOTATION MARK numeric entity.",
+"input":"&#0132;",
+"output": ["ParseError", ["Character", "\u201E"]]},
+
+{"description": "Windows-1252 HORIZONTAL ELLIPSIS numeric entity.",
+"input":"&#0133;",
+"output": ["ParseError", ["Character", "\u2026"]]},
+
+{"description": "Windows-1252 DAGGER numeric entity.",
+"input":"&#0134;",
+"output": ["ParseError", ["Character", "\u2020"]]},
+
+{"description": "Windows-1252 DOUBLE DAGGER numeric entity.",
+"input":"&#0135;",
+"output": ["ParseError", ["Character", "\u2021"]]},
+
+{"description": "Windows-1252 MODIFIER LETTER CIRCUMFLEX ACCENT numeric entity.",
+"input":"&#0136;",
+"output": ["ParseError", ["Character", "\u02C6"]]},
+
+{"description": "Windows-1252 PER MILLE SIGN numeric entity.",
+"input":"&#0137;",
+"output": ["ParseError", ["Character", "\u2030"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LETTER S WITH CARON numeric entity.",
+"input":"&#0138;",
+"output": ["ParseError", ["Character", "\u0160"]]},
+
+{"description": "Windows-1252 SINGLE LEFT-POINTING ANGLE QUOTATION MARK numeric entity.",
+"input":"&#0139;",
+"output": ["ParseError", ["Character", "\u2039"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LIGATURE OE numeric entity.",
+"input":"&#0140;",
+"output": ["ParseError", ["Character", "\u0152"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR numeric entity.",
+"input":"&#0141;",
+"output": ["ParseError", ["Character", "\u008D"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LETTER Z WITH CARON numeric entity.",
+"input":"&#0142;",
+"output": ["ParseError", ["Character", "\u017D"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR numeric entity.",
+"input":"&#0143;",
+"output": ["ParseError", ["Character", "\u008F"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR numeric entity.",
+"input":"&#0144;",
+"output": ["ParseError", ["Character", "\u0090"]]},
+
+{"description": "Windows-1252 LEFT SINGLE QUOTATION MARK numeric entity.",
+"input":"&#0145;",
+"output": ["ParseError", ["Character", "\u2018"]]},
+
+{"description": "Windows-1252 RIGHT SINGLE QUOTATION MARK numeric entity.",
+"input":"&#0146;",
+"output": ["ParseError", ["Character", "\u2019"]]},
+
+{"description": "Windows-1252 LEFT DOUBLE QUOTATION MARK numeric entity.",
+"input":"&#0147;",
+"output": ["ParseError", ["Character", "\u201C"]]},
+
+{"description": "Windows-1252 RIGHT DOUBLE QUOTATION MARK numeric entity.",
+"input":"&#0148;",
+"output": ["ParseError", ["Character", "\u201D"]]},
+
+{"description": "Windows-1252 BULLET numeric entity.",
+"input":"&#0149;",
+"output": ["ParseError", ["Character", "\u2022"]]},
+
+{"description": "Windows-1252 EN DASH numeric entity.",
+"input":"&#0150;",
+"output": ["ParseError", ["Character", "\u2013"]]},
+
+{"description": "Windows-1252 EM DASH numeric entity.",
+"input":"&#0151;",
+"output": ["ParseError", ["Character", "\u2014"]]},
+
+{"description": "Windows-1252 SMALL TILDE numeric entity.",
+"input":"&#0152;",
+"output": ["ParseError", ["Character", "\u02DC"]]},
+
+{"description": "Windows-1252 TRADE MARK SIGN numeric entity.",
+"input":"&#0153;",
+"output": ["ParseError", ["Character", "\u2122"]]},
+
+{"description": "Windows-1252 LATIN SMALL LETTER S WITH CARON numeric entity.",
+"input":"&#0154;",
+"output": ["ParseError", ["Character", "\u0161"]]},
+
+{"description": "Windows-1252 SINGLE RIGHT-POINTING ANGLE QUOTATION MARK numeric entity.",
+"input":"&#0155;",
+"output": ["ParseError", ["Character", "\u203A"]]},
+
+{"description": "Windows-1252 LATIN SMALL LIGATURE OE numeric entity.",
+"input":"&#0156;",
+"output": ["ParseError", ["Character", "\u0153"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR numeric entity.",
+"input":"&#0157;",
+"output": ["ParseError", ["Character", "\u009D"]]},
+
+{"description": "Windows-1252 EURO SIGN hexadecimal numeric entity.",
+"input":"&#x080;",
+"output": ["ParseError", ["Character", "\u20AC"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR hexadecimal numeric entity.",
+"input":"&#x081;",
+"output": ["ParseError", ["Character", "\u0081"]]},
+
+{"description": "Windows-1252 SINGLE LOW-9 QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x082;",
+"output": ["ParseError", ["Character", "\u201A"]]},
+
+{"description": "Windows-1252 LATIN SMALL LETTER F WITH HOOK hexadecimal numeric entity.",
+"input":"&#x083;",
+"output": ["ParseError", ["Character", "\u0192"]]},
+
+{"description": "Windows-1252 DOUBLE LOW-9 QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x084;",
+"output": ["ParseError", ["Character", "\u201E"]]},
+
+{"description": "Windows-1252 HORIZONTAL ELLIPSIS hexadecimal numeric entity.",
+"input":"&#x085;",
+"output": ["ParseError", ["Character", "\u2026"]]},
+
+{"description": "Windows-1252 DAGGER hexadecimal numeric entity.",
+"input":"&#x086;",
+"output": ["ParseError", ["Character", "\u2020"]]},
+
+{"description": "Windows-1252 DOUBLE DAGGER hexadecimal numeric entity.",
+"input":"&#x087;",
+"output": ["ParseError", ["Character", "\u2021"]]},
+
+{"description": "Windows-1252 MODIFIER LETTER CIRCUMFLEX ACCENT hexadecimal numeric entity.",
+"input":"&#x088;",
+"output": ["ParseError", ["Character", "\u02C6"]]},
+
+{"description": "Windows-1252 PER MILLE SIGN hexadecimal numeric entity.",
+"input":"&#x089;",
+"output": ["ParseError", ["Character", "\u2030"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LETTER S WITH CARON hexadecimal numeric entity.",
+"input":"&#x08A;",
+"output": ["ParseError", ["Character", "\u0160"]]},
+
+{"description": "Windows-1252 SINGLE LEFT-POINTING ANGLE QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x08B;",
+"output": ["ParseError", ["Character", "\u2039"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LIGATURE OE hexadecimal numeric entity.",
+"input":"&#x08C;",
+"output": ["ParseError", ["Character", "\u0152"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR hexadecimal numeric entity.",
+"input":"&#x08D;",
+"output": ["ParseError", ["Character", "\u008D"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LETTER Z WITH CARON hexadecimal numeric entity.",
+"input":"&#x08E;",
+"output": ["ParseError", ["Character", "\u017D"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR hexadecimal numeric entity.",
+"input":"&#x08F;",
+"output": ["ParseError", ["Character", "\u008F"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR hexadecimal numeric entity.",
+"input":"&#x090;",
+"output": ["ParseError", ["Character", "\u0090"]]},
+
+{"description": "Windows-1252 LEFT SINGLE QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x091;",
+"output": ["ParseError", ["Character", "\u2018"]]},
+
+{"description": "Windows-1252 RIGHT SINGLE QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x092;",
+"output": ["ParseError", ["Character", "\u2019"]]},
+
+{"description": "Windows-1252 LEFT DOUBLE QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x093;",
+"output": ["ParseError", ["Character", "\u201C"]]},
+
+{"description": "Windows-1252 RIGHT DOUBLE QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x094;",
+"output": ["ParseError", ["Character", "\u201D"]]},
+
+{"description": "Windows-1252 BULLET hexadecimal numeric entity.",
+"input":"&#x095;",
+"output": ["ParseError", ["Character", "\u2022"]]},
+
+{"description": "Windows-1252 EN DASH hexadecimal numeric entity.",
+"input":"&#x096;",
+"output": ["ParseError", ["Character", "\u2013"]]},
+
+{"description": "Windows-1252 EM DASH hexadecimal numeric entity.",
+"input":"&#x097;",
+"output": ["ParseError", ["Character", "\u2014"]]},
+
+{"description": "Windows-1252 SMALL TILDE hexadecimal numeric entity.",
+"input":"&#x098;",
+"output": ["ParseError", ["Character", "\u02DC"]]},
+
+{"description": "Windows-1252 TRADE MARK SIGN hexadecimal numeric entity.",
+"input":"&#x099;",
+"output": ["ParseError", ["Character", "\u2122"]]},
+
+{"description": "Windows-1252 LATIN SMALL LETTER S WITH CARON hexadecimal numeric entity.",
+"input":"&#x09A;",
+"output": ["ParseError", ["Character", "\u0161"]]},
+
+{"description": "Windows-1252 SINGLE RIGHT-POINTING ANGLE QUOTATION MARK hexadecimal numeric entity.",
+"input":"&#x09B;",
+"output": ["ParseError", ["Character", "\u203A"]]},
+
+{"description": "Windows-1252 LATIN SMALL LIGATURE OE hexadecimal numeric entity.",
+"input":"&#x09C;",
+"output": ["ParseError", ["Character", "\u0153"]]},
+
+{"description": "Windows-1252 REPLACEMENT CHAR hexadecimal numeric entity.",
+"input":"&#x09D;",
+"output": ["ParseError", ["Character", "\u009D"]]},
+
+{"description": "Windows-1252 LATIN SMALL LETTER Z WITH CARON hexadecimal numeric entity.",
+"input":"&#x09E;",
+"output": ["ParseError", ["Character", "\u017E"]]},
+
+{"description": "Windows-1252 LATIN CAPITAL LETTER Y WITH DIAERESIS hexadecimal numeric entity.",
+"input":"&#x09F;",
+"output": ["ParseError", ["Character", "\u0178"]]},
+
+{"description": "Decimal numeric entity followed by hex character a.",
+"input":"&#97a",
+"output": ["ParseError", ["Character", "aa"]]},
+
+{"description": "Decimal numeric entity followed by hex character A.",
+"input":"&#97A",
+"output": ["ParseError", ["Character", "aA"]]},
+
+{"description": "Decimal numeric entity followed by hex character f.",
+"input":"&#97f",
+"output": ["ParseError", ["Character", "af"]]},
+
+{"description": "Decimal numeric entity followed by hex character A.",
+"input":"&#97F",
+"output": ["ParseError", ["Character", "aF"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/escapeFlag.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/escapeFlag.test
new file mode 100644
index 000000000..18cb4309e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/escapeFlag.test
@@ -0,0 +1,33 @@
+{"tests": [
+
+{"description":"Commented close tag in RCDATA or RAWTEXT",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo<!--</xmp>--></xmp>",
+"output":[["Character", "foo<!--"], ["EndTag", "xmp"], ["Character", "-->"], ["EndTag", "xmp"]]},
+
+{"description":"Bogus comment in RCDATA or RAWTEXT",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo<!-->baz</xmp>",
+"output":[["Character", "foo<!-->baz"], ["EndTag", "xmp"]]},
+
+{"description":"End tag surrounded by bogus comment in RCDATA or RAWTEXT",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo<!--></xmp><!-->baz</xmp>",
+"output":[["Character", "foo<!-->"], ["EndTag", "xmp"], "ParseError", ["Comment", ""], ["Character", "baz"], ["EndTag", "xmp"]]},
+
+{"description":"Commented entities in RCDATA",
+"initialStates":["RCDATA state"],
+"lastStartTag":"xmp",
+"input":" &amp; <!-- &amp; --> &amp; </xmp>",
+"output":[["Character", " & <!-- & --> & "], ["EndTag", "xmp"]]},
+
+{"description":"Incorrect comment ending sequences in RCDATA or RAWTEXT",
+"initialStates":["RCDATA state", "RAWTEXT state"],
+"lastStartTag":"xmp",
+"input":"foo<!-- x --x>x-- >x--!>x--<></xmp>",
+"output":[["Character", "foo<!-- x --x>x-- >x--!>x--<>"], ["EndTag", "xmp"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/namedEntities.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/namedEntities.test
new file mode 100644
index 000000000..14db2ede0
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/namedEntities.test
@@ -0,0 +1,42210 @@
+{
+ "tests": [
+ {
+ "input": "&AElig",
+ "description": "Named entity: AElig without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c6"
+ ]
+ ]
+ },
+ {
+ "input": "&AElig;",
+ "description": "Named entity: AElig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c6"
+ ]
+ ]
+ },
+ {
+ "input": "&AMP",
+ "description": "Named entity: AMP without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "&"
+ ]
+ ]
+ },
+ {
+ "input": "&AMP;",
+ "description": "Named entity: AMP; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&"
+ ]
+ ]
+ },
+ {
+ "input": "&Aacute",
+ "description": "Named entity: Aacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c1"
+ ]
+ ]
+ },
+ {
+ "input": "&Aacute;",
+ "description": "Named entity: Aacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c1"
+ ]
+ ]
+ },
+ {
+ "input": "&Abreve",
+ "description": "Bad named entity: Abreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Abreve"
+ ]
+ ]
+ },
+ {
+ "input": "&Abreve;",
+ "description": "Named entity: Abreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0102"
+ ]
+ ]
+ },
+ {
+ "input": "&Acirc",
+ "description": "Named entity: Acirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c2"
+ ]
+ ]
+ },
+ {
+ "input": "&Acirc;",
+ "description": "Named entity: Acirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c2"
+ ]
+ ]
+ },
+ {
+ "input": "&Acy",
+ "description": "Bad named entity: Acy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Acy"
+ ]
+ ]
+ },
+ {
+ "input": "&Acy;",
+ "description": "Named entity: Acy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0410"
+ ]
+ ]
+ },
+ {
+ "input": "&Afr",
+ "description": "Bad named entity: Afr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Afr"
+ ]
+ ]
+ },
+ {
+ "input": "&Afr;",
+ "description": "Named entity: Afr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd04"
+ ]
+ ]
+ },
+ {
+ "input": "&Agrave",
+ "description": "Named entity: Agrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c0"
+ ]
+ ]
+ },
+ {
+ "input": "&Agrave;",
+ "description": "Named entity: Agrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c0"
+ ]
+ ]
+ },
+ {
+ "input": "&Alpha",
+ "description": "Bad named entity: Alpha without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Alpha"
+ ]
+ ]
+ },
+ {
+ "input": "&Alpha;",
+ "description": "Named entity: Alpha; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0391"
+ ]
+ ]
+ },
+ {
+ "input": "&Amacr",
+ "description": "Bad named entity: Amacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Amacr"
+ ]
+ ]
+ },
+ {
+ "input": "&Amacr;",
+ "description": "Named entity: Amacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0100"
+ ]
+ ]
+ },
+ {
+ "input": "&And",
+ "description": "Bad named entity: And without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&And"
+ ]
+ ]
+ },
+ {
+ "input": "&And;",
+ "description": "Named entity: And; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a53"
+ ]
+ ]
+ },
+ {
+ "input": "&Aogon",
+ "description": "Bad named entity: Aogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Aogon"
+ ]
+ ]
+ },
+ {
+ "input": "&Aogon;",
+ "description": "Named entity: Aogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0104"
+ ]
+ ]
+ },
+ {
+ "input": "&Aopf",
+ "description": "Bad named entity: Aopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Aopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Aopf;",
+ "description": "Named entity: Aopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd38"
+ ]
+ ]
+ },
+ {
+ "input": "&ApplyFunction",
+ "description": "Bad named entity: ApplyFunction without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ApplyFunction"
+ ]
+ ]
+ },
+ {
+ "input": "&ApplyFunction;",
+ "description": "Named entity: ApplyFunction; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2061"
+ ]
+ ]
+ },
+ {
+ "input": "&Aring",
+ "description": "Named entity: Aring without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c5"
+ ]
+ ]
+ },
+ {
+ "input": "&Aring;",
+ "description": "Named entity: Aring; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c5"
+ ]
+ ]
+ },
+ {
+ "input": "&Ascr",
+ "description": "Bad named entity: Ascr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ascr"
+ ]
+ ]
+ },
+ {
+ "input": "&Ascr;",
+ "description": "Named entity: Ascr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udc9c"
+ ]
+ ]
+ },
+ {
+ "input": "&Assign",
+ "description": "Bad named entity: Assign without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Assign"
+ ]
+ ]
+ },
+ {
+ "input": "&Assign;",
+ "description": "Named entity: Assign; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2254"
+ ]
+ ]
+ },
+ {
+ "input": "&Atilde",
+ "description": "Named entity: Atilde without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c3"
+ ]
+ ]
+ },
+ {
+ "input": "&Atilde;",
+ "description": "Named entity: Atilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c3"
+ ]
+ ]
+ },
+ {
+ "input": "&Auml",
+ "description": "Named entity: Auml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c4"
+ ]
+ ]
+ },
+ {
+ "input": "&Auml;",
+ "description": "Named entity: Auml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c4"
+ ]
+ ]
+ },
+ {
+ "input": "&Backslash",
+ "description": "Bad named entity: Backslash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Backslash"
+ ]
+ ]
+ },
+ {
+ "input": "&Backslash;",
+ "description": "Named entity: Backslash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2216"
+ ]
+ ]
+ },
+ {
+ "input": "&Barv",
+ "description": "Bad named entity: Barv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Barv"
+ ]
+ ]
+ },
+ {
+ "input": "&Barv;",
+ "description": "Named entity: Barv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ae7"
+ ]
+ ]
+ },
+ {
+ "input": "&Barwed",
+ "description": "Bad named entity: Barwed without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Barwed"
+ ]
+ ]
+ },
+ {
+ "input": "&Barwed;",
+ "description": "Named entity: Barwed; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2306"
+ ]
+ ]
+ },
+ {
+ "input": "&Bcy",
+ "description": "Bad named entity: Bcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Bcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Bcy;",
+ "description": "Named entity: Bcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0411"
+ ]
+ ]
+ },
+ {
+ "input": "&Because",
+ "description": "Bad named entity: Because without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Because"
+ ]
+ ]
+ },
+ {
+ "input": "&Because;",
+ "description": "Named entity: Because; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2235"
+ ]
+ ]
+ },
+ {
+ "input": "&Bernoullis",
+ "description": "Bad named entity: Bernoullis without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Bernoullis"
+ ]
+ ]
+ },
+ {
+ "input": "&Bernoullis;",
+ "description": "Named entity: Bernoullis; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u212c"
+ ]
+ ]
+ },
+ {
+ "input": "&Beta",
+ "description": "Bad named entity: Beta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Beta"
+ ]
+ ]
+ },
+ {
+ "input": "&Beta;",
+ "description": "Named entity: Beta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0392"
+ ]
+ ]
+ },
+ {
+ "input": "&Bfr",
+ "description": "Bad named entity: Bfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Bfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Bfr;",
+ "description": "Named entity: Bfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd05"
+ ]
+ ]
+ },
+ {
+ "input": "&Bopf",
+ "description": "Bad named entity: Bopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Bopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Bopf;",
+ "description": "Named entity: Bopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd39"
+ ]
+ ]
+ },
+ {
+ "input": "&Breve",
+ "description": "Bad named entity: Breve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Breve"
+ ]
+ ]
+ },
+ {
+ "input": "&Breve;",
+ "description": "Named entity: Breve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02d8"
+ ]
+ ]
+ },
+ {
+ "input": "&Bscr",
+ "description": "Bad named entity: Bscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Bscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Bscr;",
+ "description": "Named entity: Bscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u212c"
+ ]
+ ]
+ },
+ {
+ "input": "&Bumpeq",
+ "description": "Bad named entity: Bumpeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Bumpeq"
+ ]
+ ]
+ },
+ {
+ "input": "&Bumpeq;",
+ "description": "Named entity: Bumpeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224e"
+ ]
+ ]
+ },
+ {
+ "input": "&CHcy",
+ "description": "Bad named entity: CHcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CHcy"
+ ]
+ ]
+ },
+ {
+ "input": "&CHcy;",
+ "description": "Named entity: CHcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0427"
+ ]
+ ]
+ },
+ {
+ "input": "&COPY",
+ "description": "Named entity: COPY without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a9"
+ ]
+ ]
+ },
+ {
+ "input": "&COPY;",
+ "description": "Named entity: COPY; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a9"
+ ]
+ ]
+ },
+ {
+ "input": "&Cacute",
+ "description": "Bad named entity: Cacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cacute"
+ ]
+ ]
+ },
+ {
+ "input": "&Cacute;",
+ "description": "Named entity: Cacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0106"
+ ]
+ ]
+ },
+ {
+ "input": "&Cap",
+ "description": "Bad named entity: Cap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cap"
+ ]
+ ]
+ },
+ {
+ "input": "&Cap;",
+ "description": "Named entity: Cap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d2"
+ ]
+ ]
+ },
+ {
+ "input": "&CapitalDifferentialD",
+ "description": "Bad named entity: CapitalDifferentialD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CapitalDifferentialD"
+ ]
+ ]
+ },
+ {
+ "input": "&CapitalDifferentialD;",
+ "description": "Named entity: CapitalDifferentialD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2145"
+ ]
+ ]
+ },
+ {
+ "input": "&Cayleys",
+ "description": "Bad named entity: Cayleys without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cayleys"
+ ]
+ ]
+ },
+ {
+ "input": "&Cayleys;",
+ "description": "Named entity: Cayleys; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u212d"
+ ]
+ ]
+ },
+ {
+ "input": "&Ccaron",
+ "description": "Bad named entity: Ccaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ccaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Ccaron;",
+ "description": "Named entity: Ccaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u010c"
+ ]
+ ]
+ },
+ {
+ "input": "&Ccedil",
+ "description": "Named entity: Ccedil without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c7"
+ ]
+ ]
+ },
+ {
+ "input": "&Ccedil;",
+ "description": "Named entity: Ccedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c7"
+ ]
+ ]
+ },
+ {
+ "input": "&Ccirc",
+ "description": "Bad named entity: Ccirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ccirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Ccirc;",
+ "description": "Named entity: Ccirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0108"
+ ]
+ ]
+ },
+ {
+ "input": "&Cconint",
+ "description": "Bad named entity: Cconint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cconint"
+ ]
+ ]
+ },
+ {
+ "input": "&Cconint;",
+ "description": "Named entity: Cconint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2230"
+ ]
+ ]
+ },
+ {
+ "input": "&Cdot",
+ "description": "Bad named entity: Cdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cdot"
+ ]
+ ]
+ },
+ {
+ "input": "&Cdot;",
+ "description": "Named entity: Cdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u010a"
+ ]
+ ]
+ },
+ {
+ "input": "&Cedilla",
+ "description": "Bad named entity: Cedilla without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cedilla"
+ ]
+ ]
+ },
+ {
+ "input": "&Cedilla;",
+ "description": "Named entity: Cedilla; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b8"
+ ]
+ ]
+ },
+ {
+ "input": "&CenterDot",
+ "description": "Bad named entity: CenterDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CenterDot"
+ ]
+ ]
+ },
+ {
+ "input": "&CenterDot;",
+ "description": "Named entity: CenterDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b7"
+ ]
+ ]
+ },
+ {
+ "input": "&Cfr",
+ "description": "Bad named entity: Cfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Cfr;",
+ "description": "Named entity: Cfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u212d"
+ ]
+ ]
+ },
+ {
+ "input": "&Chi",
+ "description": "Bad named entity: Chi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Chi"
+ ]
+ ]
+ },
+ {
+ "input": "&Chi;",
+ "description": "Named entity: Chi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a7"
+ ]
+ ]
+ },
+ {
+ "input": "&CircleDot",
+ "description": "Bad named entity: CircleDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CircleDot"
+ ]
+ ]
+ },
+ {
+ "input": "&CircleDot;",
+ "description": "Named entity: CircleDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2299"
+ ]
+ ]
+ },
+ {
+ "input": "&CircleMinus",
+ "description": "Bad named entity: CircleMinus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CircleMinus"
+ ]
+ ]
+ },
+ {
+ "input": "&CircleMinus;",
+ "description": "Named entity: CircleMinus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2296"
+ ]
+ ]
+ },
+ {
+ "input": "&CirclePlus",
+ "description": "Bad named entity: CirclePlus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CirclePlus"
+ ]
+ ]
+ },
+ {
+ "input": "&CirclePlus;",
+ "description": "Named entity: CirclePlus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2295"
+ ]
+ ]
+ },
+ {
+ "input": "&CircleTimes",
+ "description": "Bad named entity: CircleTimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CircleTimes"
+ ]
+ ]
+ },
+ {
+ "input": "&CircleTimes;",
+ "description": "Named entity: CircleTimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2297"
+ ]
+ ]
+ },
+ {
+ "input": "&ClockwiseContourIntegral",
+ "description": "Bad named entity: ClockwiseContourIntegral without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ClockwiseContourIntegral"
+ ]
+ ]
+ },
+ {
+ "input": "&ClockwiseContourIntegral;",
+ "description": "Named entity: ClockwiseContourIntegral; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2232"
+ ]
+ ]
+ },
+ {
+ "input": "&CloseCurlyDoubleQuote",
+ "description": "Bad named entity: CloseCurlyDoubleQuote without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CloseCurlyDoubleQuote"
+ ]
+ ]
+ },
+ {
+ "input": "&CloseCurlyDoubleQuote;",
+ "description": "Named entity: CloseCurlyDoubleQuote; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201d"
+ ]
+ ]
+ },
+ {
+ "input": "&CloseCurlyQuote",
+ "description": "Bad named entity: CloseCurlyQuote without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CloseCurlyQuote"
+ ]
+ ]
+ },
+ {
+ "input": "&CloseCurlyQuote;",
+ "description": "Named entity: CloseCurlyQuote; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2019"
+ ]
+ ]
+ },
+ {
+ "input": "&Colon",
+ "description": "Bad named entity: Colon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Colon"
+ ]
+ ]
+ },
+ {
+ "input": "&Colon;",
+ "description": "Named entity: Colon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2237"
+ ]
+ ]
+ },
+ {
+ "input": "&Colone",
+ "description": "Bad named entity: Colone without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Colone"
+ ]
+ ]
+ },
+ {
+ "input": "&Colone;",
+ "description": "Named entity: Colone; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a74"
+ ]
+ ]
+ },
+ {
+ "input": "&Congruent",
+ "description": "Bad named entity: Congruent without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Congruent"
+ ]
+ ]
+ },
+ {
+ "input": "&Congruent;",
+ "description": "Named entity: Congruent; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2261"
+ ]
+ ]
+ },
+ {
+ "input": "&Conint",
+ "description": "Bad named entity: Conint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Conint"
+ ]
+ ]
+ },
+ {
+ "input": "&Conint;",
+ "description": "Named entity: Conint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222f"
+ ]
+ ]
+ },
+ {
+ "input": "&ContourIntegral",
+ "description": "Bad named entity: ContourIntegral without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ContourIntegral"
+ ]
+ ]
+ },
+ {
+ "input": "&ContourIntegral;",
+ "description": "Named entity: ContourIntegral; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222e"
+ ]
+ ]
+ },
+ {
+ "input": "&Copf",
+ "description": "Bad named entity: Copf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Copf"
+ ]
+ ]
+ },
+ {
+ "input": "&Copf;",
+ "description": "Named entity: Copf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2102"
+ ]
+ ]
+ },
+ {
+ "input": "&Coproduct",
+ "description": "Bad named entity: Coproduct without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Coproduct"
+ ]
+ ]
+ },
+ {
+ "input": "&Coproduct;",
+ "description": "Named entity: Coproduct; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2210"
+ ]
+ ]
+ },
+ {
+ "input": "&CounterClockwiseContourIntegral",
+ "description": "Bad named entity: CounterClockwiseContourIntegral without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CounterClockwiseContourIntegral"
+ ]
+ ]
+ },
+ {
+ "input": "&CounterClockwiseContourIntegral;",
+ "description": "Named entity: CounterClockwiseContourIntegral; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2233"
+ ]
+ ]
+ },
+ {
+ "input": "&Cross",
+ "description": "Bad named entity: Cross without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cross"
+ ]
+ ]
+ },
+ {
+ "input": "&Cross;",
+ "description": "Named entity: Cross; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a2f"
+ ]
+ ]
+ },
+ {
+ "input": "&Cscr",
+ "description": "Bad named entity: Cscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Cscr;",
+ "description": "Named entity: Cscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udc9e"
+ ]
+ ]
+ },
+ {
+ "input": "&Cup",
+ "description": "Bad named entity: Cup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Cup"
+ ]
+ ]
+ },
+ {
+ "input": "&Cup;",
+ "description": "Named entity: Cup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d3"
+ ]
+ ]
+ },
+ {
+ "input": "&CupCap",
+ "description": "Bad named entity: CupCap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&CupCap"
+ ]
+ ]
+ },
+ {
+ "input": "&CupCap;",
+ "description": "Named entity: CupCap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224d"
+ ]
+ ]
+ },
+ {
+ "input": "&DD",
+ "description": "Bad named entity: DD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DD"
+ ]
+ ]
+ },
+ {
+ "input": "&DD;",
+ "description": "Named entity: DD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2145"
+ ]
+ ]
+ },
+ {
+ "input": "&DDotrahd",
+ "description": "Bad named entity: DDotrahd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DDotrahd"
+ ]
+ ]
+ },
+ {
+ "input": "&DDotrahd;",
+ "description": "Named entity: DDotrahd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2911"
+ ]
+ ]
+ },
+ {
+ "input": "&DJcy",
+ "description": "Bad named entity: DJcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DJcy"
+ ]
+ ]
+ },
+ {
+ "input": "&DJcy;",
+ "description": "Named entity: DJcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0402"
+ ]
+ ]
+ },
+ {
+ "input": "&DScy",
+ "description": "Bad named entity: DScy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DScy"
+ ]
+ ]
+ },
+ {
+ "input": "&DScy;",
+ "description": "Named entity: DScy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0405"
+ ]
+ ]
+ },
+ {
+ "input": "&DZcy",
+ "description": "Bad named entity: DZcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DZcy"
+ ]
+ ]
+ },
+ {
+ "input": "&DZcy;",
+ "description": "Named entity: DZcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u040f"
+ ]
+ ]
+ },
+ {
+ "input": "&Dagger",
+ "description": "Bad named entity: Dagger without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dagger"
+ ]
+ ]
+ },
+ {
+ "input": "&Dagger;",
+ "description": "Named entity: Dagger; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2021"
+ ]
+ ]
+ },
+ {
+ "input": "&Darr",
+ "description": "Bad named entity: Darr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Darr"
+ ]
+ ]
+ },
+ {
+ "input": "&Darr;",
+ "description": "Named entity: Darr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a1"
+ ]
+ ]
+ },
+ {
+ "input": "&Dashv",
+ "description": "Bad named entity: Dashv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dashv"
+ ]
+ ]
+ },
+ {
+ "input": "&Dashv;",
+ "description": "Named entity: Dashv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ae4"
+ ]
+ ]
+ },
+ {
+ "input": "&Dcaron",
+ "description": "Bad named entity: Dcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Dcaron;",
+ "description": "Named entity: Dcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u010e"
+ ]
+ ]
+ },
+ {
+ "input": "&Dcy",
+ "description": "Bad named entity: Dcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Dcy;",
+ "description": "Named entity: Dcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0414"
+ ]
+ ]
+ },
+ {
+ "input": "&Del",
+ "description": "Bad named entity: Del without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Del"
+ ]
+ ]
+ },
+ {
+ "input": "&Del;",
+ "description": "Named entity: Del; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2207"
+ ]
+ ]
+ },
+ {
+ "input": "&Delta",
+ "description": "Bad named entity: Delta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Delta"
+ ]
+ ]
+ },
+ {
+ "input": "&Delta;",
+ "description": "Named entity: Delta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0394"
+ ]
+ ]
+ },
+ {
+ "input": "&Dfr",
+ "description": "Bad named entity: Dfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Dfr;",
+ "description": "Named entity: Dfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd07"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalAcute",
+ "description": "Bad named entity: DiacriticalAcute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DiacriticalAcute"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalAcute;",
+ "description": "Named entity: DiacriticalAcute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b4"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalDot",
+ "description": "Bad named entity: DiacriticalDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DiacriticalDot"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalDot;",
+ "description": "Named entity: DiacriticalDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02d9"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalDoubleAcute",
+ "description": "Bad named entity: DiacriticalDoubleAcute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DiacriticalDoubleAcute"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalDoubleAcute;",
+ "description": "Named entity: DiacriticalDoubleAcute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02dd"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalGrave",
+ "description": "Bad named entity: DiacriticalGrave without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DiacriticalGrave"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalGrave;",
+ "description": "Named entity: DiacriticalGrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "`"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalTilde",
+ "description": "Bad named entity: DiacriticalTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DiacriticalTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&DiacriticalTilde;",
+ "description": "Named entity: DiacriticalTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02dc"
+ ]
+ ]
+ },
+ {
+ "input": "&Diamond",
+ "description": "Bad named entity: Diamond without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Diamond"
+ ]
+ ]
+ },
+ {
+ "input": "&Diamond;",
+ "description": "Named entity: Diamond; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c4"
+ ]
+ ]
+ },
+ {
+ "input": "&DifferentialD",
+ "description": "Bad named entity: DifferentialD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DifferentialD"
+ ]
+ ]
+ },
+ {
+ "input": "&DifferentialD;",
+ "description": "Named entity: DifferentialD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2146"
+ ]
+ ]
+ },
+ {
+ "input": "&Dopf",
+ "description": "Bad named entity: Dopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Dopf;",
+ "description": "Named entity: Dopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd3b"
+ ]
+ ]
+ },
+ {
+ "input": "&Dot",
+ "description": "Bad named entity: Dot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dot"
+ ]
+ ]
+ },
+ {
+ "input": "&Dot;",
+ "description": "Named entity: Dot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a8"
+ ]
+ ]
+ },
+ {
+ "input": "&DotDot",
+ "description": "Bad named entity: DotDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DotDot"
+ ]
+ ]
+ },
+ {
+ "input": "&DotDot;",
+ "description": "Named entity: DotDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u20dc"
+ ]
+ ]
+ },
+ {
+ "input": "&DotEqual",
+ "description": "Bad named entity: DotEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DotEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&DotEqual;",
+ "description": "Named entity: DotEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2250"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleContourIntegral",
+ "description": "Bad named entity: DoubleContourIntegral without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleContourIntegral"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleContourIntegral;",
+ "description": "Named entity: DoubleContourIntegral; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222f"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleDot",
+ "description": "Bad named entity: DoubleDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleDot"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleDot;",
+ "description": "Named entity: DoubleDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a8"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleDownArrow",
+ "description": "Bad named entity: DoubleDownArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleDownArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleDownArrow;",
+ "description": "Named entity: DoubleDownArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d3"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLeftArrow",
+ "description": "Bad named entity: DoubleLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLeftArrow;",
+ "description": "Named entity: DoubleLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d0"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLeftRightArrow",
+ "description": "Bad named entity: DoubleLeftRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleLeftRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLeftRightArrow;",
+ "description": "Named entity: DoubleLeftRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d4"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLeftTee",
+ "description": "Bad named entity: DoubleLeftTee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleLeftTee"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLeftTee;",
+ "description": "Named entity: DoubleLeftTee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ae4"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLongLeftArrow",
+ "description": "Bad named entity: DoubleLongLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleLongLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLongLeftArrow;",
+ "description": "Named entity: DoubleLongLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f8"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLongLeftRightArrow",
+ "description": "Bad named entity: DoubleLongLeftRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleLongLeftRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLongLeftRightArrow;",
+ "description": "Named entity: DoubleLongLeftRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27fa"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLongRightArrow",
+ "description": "Bad named entity: DoubleLongRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleLongRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleLongRightArrow;",
+ "description": "Named entity: DoubleLongRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f9"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleRightArrow",
+ "description": "Bad named entity: DoubleRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleRightArrow;",
+ "description": "Named entity: DoubleRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d2"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleRightTee",
+ "description": "Bad named entity: DoubleRightTee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleRightTee"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleRightTee;",
+ "description": "Named entity: DoubleRightTee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a8"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleUpArrow",
+ "description": "Bad named entity: DoubleUpArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleUpArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleUpArrow;",
+ "description": "Named entity: DoubleUpArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d1"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleUpDownArrow",
+ "description": "Bad named entity: DoubleUpDownArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleUpDownArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleUpDownArrow;",
+ "description": "Named entity: DoubleUpDownArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d5"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleVerticalBar",
+ "description": "Bad named entity: DoubleVerticalBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DoubleVerticalBar"
+ ]
+ ]
+ },
+ {
+ "input": "&DoubleVerticalBar;",
+ "description": "Named entity: DoubleVerticalBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2225"
+ ]
+ ]
+ },
+ {
+ "input": "&DownArrow",
+ "description": "Bad named entity: DownArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DownArrow;",
+ "description": "Named entity: DownArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2193"
+ ]
+ ]
+ },
+ {
+ "input": "&DownArrowBar",
+ "description": "Bad named entity: DownArrowBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownArrowBar"
+ ]
+ ]
+ },
+ {
+ "input": "&DownArrowBar;",
+ "description": "Named entity: DownArrowBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2913"
+ ]
+ ]
+ },
+ {
+ "input": "&DownArrowUpArrow",
+ "description": "Bad named entity: DownArrowUpArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownArrowUpArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DownArrowUpArrow;",
+ "description": "Named entity: DownArrowUpArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21f5"
+ ]
+ ]
+ },
+ {
+ "input": "&DownBreve",
+ "description": "Bad named entity: DownBreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownBreve"
+ ]
+ ]
+ },
+ {
+ "input": "&DownBreve;",
+ "description": "Named entity: DownBreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0311"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftRightVector",
+ "description": "Bad named entity: DownLeftRightVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownLeftRightVector"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftRightVector;",
+ "description": "Named entity: DownLeftRightVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2950"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftTeeVector",
+ "description": "Bad named entity: DownLeftTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownLeftTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftTeeVector;",
+ "description": "Named entity: DownLeftTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u295e"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftVector",
+ "description": "Bad named entity: DownLeftVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownLeftVector"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftVector;",
+ "description": "Named entity: DownLeftVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bd"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftVectorBar",
+ "description": "Bad named entity: DownLeftVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownLeftVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&DownLeftVectorBar;",
+ "description": "Named entity: DownLeftVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2956"
+ ]
+ ]
+ },
+ {
+ "input": "&DownRightTeeVector",
+ "description": "Bad named entity: DownRightTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownRightTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&DownRightTeeVector;",
+ "description": "Named entity: DownRightTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u295f"
+ ]
+ ]
+ },
+ {
+ "input": "&DownRightVector",
+ "description": "Bad named entity: DownRightVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownRightVector"
+ ]
+ ]
+ },
+ {
+ "input": "&DownRightVector;",
+ "description": "Named entity: DownRightVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c1"
+ ]
+ ]
+ },
+ {
+ "input": "&DownRightVectorBar",
+ "description": "Bad named entity: DownRightVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownRightVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&DownRightVectorBar;",
+ "description": "Named entity: DownRightVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2957"
+ ]
+ ]
+ },
+ {
+ "input": "&DownTee",
+ "description": "Bad named entity: DownTee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownTee"
+ ]
+ ]
+ },
+ {
+ "input": "&DownTee;",
+ "description": "Named entity: DownTee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a4"
+ ]
+ ]
+ },
+ {
+ "input": "&DownTeeArrow",
+ "description": "Bad named entity: DownTeeArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&DownTeeArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&DownTeeArrow;",
+ "description": "Named entity: DownTeeArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a7"
+ ]
+ ]
+ },
+ {
+ "input": "&Downarrow",
+ "description": "Bad named entity: Downarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Downarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Downarrow;",
+ "description": "Named entity: Downarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d3"
+ ]
+ ]
+ },
+ {
+ "input": "&Dscr",
+ "description": "Bad named entity: Dscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Dscr;",
+ "description": "Named entity: Dscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udc9f"
+ ]
+ ]
+ },
+ {
+ "input": "&Dstrok",
+ "description": "Bad named entity: Dstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Dstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&Dstrok;",
+ "description": "Named entity: Dstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0110"
+ ]
+ ]
+ },
+ {
+ "input": "&ENG",
+ "description": "Bad named entity: ENG without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ENG"
+ ]
+ ]
+ },
+ {
+ "input": "&ENG;",
+ "description": "Named entity: ENG; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u014a"
+ ]
+ ]
+ },
+ {
+ "input": "&ETH",
+ "description": "Named entity: ETH without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d0"
+ ]
+ ]
+ },
+ {
+ "input": "&ETH;",
+ "description": "Named entity: ETH; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d0"
+ ]
+ ]
+ },
+ {
+ "input": "&Eacute",
+ "description": "Named entity: Eacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c9"
+ ]
+ ]
+ },
+ {
+ "input": "&Eacute;",
+ "description": "Named entity: Eacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c9"
+ ]
+ ]
+ },
+ {
+ "input": "&Ecaron",
+ "description": "Bad named entity: Ecaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ecaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Ecaron;",
+ "description": "Named entity: Ecaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u011a"
+ ]
+ ]
+ },
+ {
+ "input": "&Ecirc",
+ "description": "Named entity: Ecirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ca"
+ ]
+ ]
+ },
+ {
+ "input": "&Ecirc;",
+ "description": "Named entity: Ecirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ca"
+ ]
+ ]
+ },
+ {
+ "input": "&Ecy",
+ "description": "Bad named entity: Ecy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ecy"
+ ]
+ ]
+ },
+ {
+ "input": "&Ecy;",
+ "description": "Named entity: Ecy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u042d"
+ ]
+ ]
+ },
+ {
+ "input": "&Edot",
+ "description": "Bad named entity: Edot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Edot"
+ ]
+ ]
+ },
+ {
+ "input": "&Edot;",
+ "description": "Named entity: Edot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0116"
+ ]
+ ]
+ },
+ {
+ "input": "&Efr",
+ "description": "Bad named entity: Efr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Efr"
+ ]
+ ]
+ },
+ {
+ "input": "&Efr;",
+ "description": "Named entity: Efr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd08"
+ ]
+ ]
+ },
+ {
+ "input": "&Egrave",
+ "description": "Named entity: Egrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00c8"
+ ]
+ ]
+ },
+ {
+ "input": "&Egrave;",
+ "description": "Named entity: Egrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c8"
+ ]
+ ]
+ },
+ {
+ "input": "&Element",
+ "description": "Bad named entity: Element without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Element"
+ ]
+ ]
+ },
+ {
+ "input": "&Element;",
+ "description": "Named entity: Element; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2208"
+ ]
+ ]
+ },
+ {
+ "input": "&Emacr",
+ "description": "Bad named entity: Emacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Emacr"
+ ]
+ ]
+ },
+ {
+ "input": "&Emacr;",
+ "description": "Named entity: Emacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0112"
+ ]
+ ]
+ },
+ {
+ "input": "&EmptySmallSquare",
+ "description": "Bad named entity: EmptySmallSquare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&EmptySmallSquare"
+ ]
+ ]
+ },
+ {
+ "input": "&EmptySmallSquare;",
+ "description": "Named entity: EmptySmallSquare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25fb"
+ ]
+ ]
+ },
+ {
+ "input": "&EmptyVerySmallSquare",
+ "description": "Bad named entity: EmptyVerySmallSquare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&EmptyVerySmallSquare"
+ ]
+ ]
+ },
+ {
+ "input": "&EmptyVerySmallSquare;",
+ "description": "Named entity: EmptyVerySmallSquare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ab"
+ ]
+ ]
+ },
+ {
+ "input": "&Eogon",
+ "description": "Bad named entity: Eogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Eogon"
+ ]
+ ]
+ },
+ {
+ "input": "&Eogon;",
+ "description": "Named entity: Eogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0118"
+ ]
+ ]
+ },
+ {
+ "input": "&Eopf",
+ "description": "Bad named entity: Eopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Eopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Eopf;",
+ "description": "Named entity: Eopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd3c"
+ ]
+ ]
+ },
+ {
+ "input": "&Epsilon",
+ "description": "Bad named entity: Epsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Epsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&Epsilon;",
+ "description": "Named entity: Epsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0395"
+ ]
+ ]
+ },
+ {
+ "input": "&Equal",
+ "description": "Bad named entity: Equal without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Equal"
+ ]
+ ]
+ },
+ {
+ "input": "&Equal;",
+ "description": "Named entity: Equal; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a75"
+ ]
+ ]
+ },
+ {
+ "input": "&EqualTilde",
+ "description": "Bad named entity: EqualTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&EqualTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&EqualTilde;",
+ "description": "Named entity: EqualTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2242"
+ ]
+ ]
+ },
+ {
+ "input": "&Equilibrium",
+ "description": "Bad named entity: Equilibrium without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Equilibrium"
+ ]
+ ]
+ },
+ {
+ "input": "&Equilibrium;",
+ "description": "Named entity: Equilibrium; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cc"
+ ]
+ ]
+ },
+ {
+ "input": "&Escr",
+ "description": "Bad named entity: Escr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Escr"
+ ]
+ ]
+ },
+ {
+ "input": "&Escr;",
+ "description": "Named entity: Escr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2130"
+ ]
+ ]
+ },
+ {
+ "input": "&Esim",
+ "description": "Bad named entity: Esim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Esim"
+ ]
+ ]
+ },
+ {
+ "input": "&Esim;",
+ "description": "Named entity: Esim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a73"
+ ]
+ ]
+ },
+ {
+ "input": "&Eta",
+ "description": "Bad named entity: Eta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Eta"
+ ]
+ ]
+ },
+ {
+ "input": "&Eta;",
+ "description": "Named entity: Eta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0397"
+ ]
+ ]
+ },
+ {
+ "input": "&Euml",
+ "description": "Named entity: Euml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00cb"
+ ]
+ ]
+ },
+ {
+ "input": "&Euml;",
+ "description": "Named entity: Euml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00cb"
+ ]
+ ]
+ },
+ {
+ "input": "&Exists",
+ "description": "Bad named entity: Exists without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Exists"
+ ]
+ ]
+ },
+ {
+ "input": "&Exists;",
+ "description": "Named entity: Exists; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2203"
+ ]
+ ]
+ },
+ {
+ "input": "&ExponentialE",
+ "description": "Bad named entity: ExponentialE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ExponentialE"
+ ]
+ ]
+ },
+ {
+ "input": "&ExponentialE;",
+ "description": "Named entity: ExponentialE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2147"
+ ]
+ ]
+ },
+ {
+ "input": "&Fcy",
+ "description": "Bad named entity: Fcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Fcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Fcy;",
+ "description": "Named entity: Fcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0424"
+ ]
+ ]
+ },
+ {
+ "input": "&Ffr",
+ "description": "Bad named entity: Ffr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ffr"
+ ]
+ ]
+ },
+ {
+ "input": "&Ffr;",
+ "description": "Named entity: Ffr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd09"
+ ]
+ ]
+ },
+ {
+ "input": "&FilledSmallSquare",
+ "description": "Bad named entity: FilledSmallSquare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&FilledSmallSquare"
+ ]
+ ]
+ },
+ {
+ "input": "&FilledSmallSquare;",
+ "description": "Named entity: FilledSmallSquare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25fc"
+ ]
+ ]
+ },
+ {
+ "input": "&FilledVerySmallSquare",
+ "description": "Bad named entity: FilledVerySmallSquare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&FilledVerySmallSquare"
+ ]
+ ]
+ },
+ {
+ "input": "&FilledVerySmallSquare;",
+ "description": "Named entity: FilledVerySmallSquare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25aa"
+ ]
+ ]
+ },
+ {
+ "input": "&Fopf",
+ "description": "Bad named entity: Fopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Fopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Fopf;",
+ "description": "Named entity: Fopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd3d"
+ ]
+ ]
+ },
+ {
+ "input": "&ForAll",
+ "description": "Bad named entity: ForAll without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ForAll"
+ ]
+ ]
+ },
+ {
+ "input": "&ForAll;",
+ "description": "Named entity: ForAll; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2200"
+ ]
+ ]
+ },
+ {
+ "input": "&Fouriertrf",
+ "description": "Bad named entity: Fouriertrf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Fouriertrf"
+ ]
+ ]
+ },
+ {
+ "input": "&Fouriertrf;",
+ "description": "Named entity: Fouriertrf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2131"
+ ]
+ ]
+ },
+ {
+ "input": "&Fscr",
+ "description": "Bad named entity: Fscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Fscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Fscr;",
+ "description": "Named entity: Fscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2131"
+ ]
+ ]
+ },
+ {
+ "input": "&GJcy",
+ "description": "Bad named entity: GJcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GJcy"
+ ]
+ ]
+ },
+ {
+ "input": "&GJcy;",
+ "description": "Named entity: GJcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0403"
+ ]
+ ]
+ },
+ {
+ "input": "&GT",
+ "description": "Named entity: GT without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ ">"
+ ]
+ ]
+ },
+ {
+ "input": "&GT;",
+ "description": "Named entity: GT; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ">"
+ ]
+ ]
+ },
+ {
+ "input": "&Gamma",
+ "description": "Bad named entity: Gamma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gamma"
+ ]
+ ]
+ },
+ {
+ "input": "&Gamma;",
+ "description": "Named entity: Gamma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0393"
+ ]
+ ]
+ },
+ {
+ "input": "&Gammad",
+ "description": "Bad named entity: Gammad without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gammad"
+ ]
+ ]
+ },
+ {
+ "input": "&Gammad;",
+ "description": "Named entity: Gammad; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03dc"
+ ]
+ ]
+ },
+ {
+ "input": "&Gbreve",
+ "description": "Bad named entity: Gbreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gbreve"
+ ]
+ ]
+ },
+ {
+ "input": "&Gbreve;",
+ "description": "Named entity: Gbreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u011e"
+ ]
+ ]
+ },
+ {
+ "input": "&Gcedil",
+ "description": "Bad named entity: Gcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Gcedil;",
+ "description": "Named entity: Gcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0122"
+ ]
+ ]
+ },
+ {
+ "input": "&Gcirc",
+ "description": "Bad named entity: Gcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Gcirc;",
+ "description": "Named entity: Gcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u011c"
+ ]
+ ]
+ },
+ {
+ "input": "&Gcy",
+ "description": "Bad named entity: Gcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Gcy;",
+ "description": "Named entity: Gcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0413"
+ ]
+ ]
+ },
+ {
+ "input": "&Gdot",
+ "description": "Bad named entity: Gdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gdot"
+ ]
+ ]
+ },
+ {
+ "input": "&Gdot;",
+ "description": "Named entity: Gdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0120"
+ ]
+ ]
+ },
+ {
+ "input": "&Gfr",
+ "description": "Bad named entity: Gfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Gfr;",
+ "description": "Named entity: Gfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd0a"
+ ]
+ ]
+ },
+ {
+ "input": "&Gg",
+ "description": "Bad named entity: Gg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gg"
+ ]
+ ]
+ },
+ {
+ "input": "&Gg;",
+ "description": "Named entity: Gg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d9"
+ ]
+ ]
+ },
+ {
+ "input": "&Gopf",
+ "description": "Bad named entity: Gopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Gopf;",
+ "description": "Named entity: Gopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd3e"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterEqual",
+ "description": "Bad named entity: GreaterEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterEqual;",
+ "description": "Named entity: GreaterEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2265"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterEqualLess",
+ "description": "Bad named entity: GreaterEqualLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterEqualLess"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterEqualLess;",
+ "description": "Named entity: GreaterEqualLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22db"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterFullEqual",
+ "description": "Bad named entity: GreaterFullEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterFullEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterFullEqual;",
+ "description": "Named entity: GreaterFullEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2267"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterGreater",
+ "description": "Bad named entity: GreaterGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterGreater;",
+ "description": "Named entity: GreaterGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa2"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterLess",
+ "description": "Bad named entity: GreaterLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterLess"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterLess;",
+ "description": "Named entity: GreaterLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2277"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterSlantEqual",
+ "description": "Bad named entity: GreaterSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterSlantEqual;",
+ "description": "Named entity: GreaterSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7e"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterTilde",
+ "description": "Bad named entity: GreaterTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&GreaterTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&GreaterTilde;",
+ "description": "Named entity: GreaterTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2273"
+ ]
+ ]
+ },
+ {
+ "input": "&Gscr",
+ "description": "Bad named entity: Gscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Gscr;",
+ "description": "Named entity: Gscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udca2"
+ ]
+ ]
+ },
+ {
+ "input": "&Gt",
+ "description": "Bad named entity: Gt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Gt"
+ ]
+ ]
+ },
+ {
+ "input": "&Gt;",
+ "description": "Named entity: Gt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226b"
+ ]
+ ]
+ },
+ {
+ "input": "&HARDcy",
+ "description": "Bad named entity: HARDcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&HARDcy"
+ ]
+ ]
+ },
+ {
+ "input": "&HARDcy;",
+ "description": "Named entity: HARDcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u042a"
+ ]
+ ]
+ },
+ {
+ "input": "&Hacek",
+ "description": "Bad named entity: Hacek without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hacek"
+ ]
+ ]
+ },
+ {
+ "input": "&Hacek;",
+ "description": "Named entity: Hacek; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02c7"
+ ]
+ ]
+ },
+ {
+ "input": "&Hat",
+ "description": "Bad named entity: Hat without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hat"
+ ]
+ ]
+ },
+ {
+ "input": "&Hat;",
+ "description": "Named entity: Hat; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "^"
+ ]
+ ]
+ },
+ {
+ "input": "&Hcirc",
+ "description": "Bad named entity: Hcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Hcirc;",
+ "description": "Named entity: Hcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0124"
+ ]
+ ]
+ },
+ {
+ "input": "&Hfr",
+ "description": "Bad named entity: Hfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Hfr;",
+ "description": "Named entity: Hfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210c"
+ ]
+ ]
+ },
+ {
+ "input": "&HilbertSpace",
+ "description": "Bad named entity: HilbertSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&HilbertSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&HilbertSpace;",
+ "description": "Named entity: HilbertSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210b"
+ ]
+ ]
+ },
+ {
+ "input": "&Hopf",
+ "description": "Bad named entity: Hopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Hopf;",
+ "description": "Named entity: Hopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210d"
+ ]
+ ]
+ },
+ {
+ "input": "&HorizontalLine",
+ "description": "Bad named entity: HorizontalLine without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&HorizontalLine"
+ ]
+ ]
+ },
+ {
+ "input": "&HorizontalLine;",
+ "description": "Named entity: HorizontalLine; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2500"
+ ]
+ ]
+ },
+ {
+ "input": "&Hscr",
+ "description": "Bad named entity: Hscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Hscr;",
+ "description": "Named entity: Hscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210b"
+ ]
+ ]
+ },
+ {
+ "input": "&Hstrok",
+ "description": "Bad named entity: Hstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Hstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&Hstrok;",
+ "description": "Named entity: Hstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0126"
+ ]
+ ]
+ },
+ {
+ "input": "&HumpDownHump",
+ "description": "Bad named entity: HumpDownHump without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&HumpDownHump"
+ ]
+ ]
+ },
+ {
+ "input": "&HumpDownHump;",
+ "description": "Named entity: HumpDownHump; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224e"
+ ]
+ ]
+ },
+ {
+ "input": "&HumpEqual",
+ "description": "Bad named entity: HumpEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&HumpEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&HumpEqual;",
+ "description": "Named entity: HumpEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224f"
+ ]
+ ]
+ },
+ {
+ "input": "&IEcy",
+ "description": "Bad named entity: IEcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&IEcy"
+ ]
+ ]
+ },
+ {
+ "input": "&IEcy;",
+ "description": "Named entity: IEcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0415"
+ ]
+ ]
+ },
+ {
+ "input": "&IJlig",
+ "description": "Bad named entity: IJlig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&IJlig"
+ ]
+ ]
+ },
+ {
+ "input": "&IJlig;",
+ "description": "Named entity: IJlig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0132"
+ ]
+ ]
+ },
+ {
+ "input": "&IOcy",
+ "description": "Bad named entity: IOcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&IOcy"
+ ]
+ ]
+ },
+ {
+ "input": "&IOcy;",
+ "description": "Named entity: IOcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0401"
+ ]
+ ]
+ },
+ {
+ "input": "&Iacute",
+ "description": "Named entity: Iacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00cd"
+ ]
+ ]
+ },
+ {
+ "input": "&Iacute;",
+ "description": "Named entity: Iacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00cd"
+ ]
+ ]
+ },
+ {
+ "input": "&Icirc",
+ "description": "Named entity: Icirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ce"
+ ]
+ ]
+ },
+ {
+ "input": "&Icirc;",
+ "description": "Named entity: Icirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ce"
+ ]
+ ]
+ },
+ {
+ "input": "&Icy",
+ "description": "Bad named entity: Icy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Icy"
+ ]
+ ]
+ },
+ {
+ "input": "&Icy;",
+ "description": "Named entity: Icy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0418"
+ ]
+ ]
+ },
+ {
+ "input": "&Idot",
+ "description": "Bad named entity: Idot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Idot"
+ ]
+ ]
+ },
+ {
+ "input": "&Idot;",
+ "description": "Named entity: Idot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0130"
+ ]
+ ]
+ },
+ {
+ "input": "&Ifr",
+ "description": "Bad named entity: Ifr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ifr"
+ ]
+ ]
+ },
+ {
+ "input": "&Ifr;",
+ "description": "Named entity: Ifr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2111"
+ ]
+ ]
+ },
+ {
+ "input": "&Igrave",
+ "description": "Named entity: Igrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00cc"
+ ]
+ ]
+ },
+ {
+ "input": "&Igrave;",
+ "description": "Named entity: Igrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00cc"
+ ]
+ ]
+ },
+ {
+ "input": "&Im",
+ "description": "Bad named entity: Im without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Im"
+ ]
+ ]
+ },
+ {
+ "input": "&Im;",
+ "description": "Named entity: Im; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2111"
+ ]
+ ]
+ },
+ {
+ "input": "&Imacr",
+ "description": "Bad named entity: Imacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Imacr"
+ ]
+ ]
+ },
+ {
+ "input": "&Imacr;",
+ "description": "Named entity: Imacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u012a"
+ ]
+ ]
+ },
+ {
+ "input": "&ImaginaryI",
+ "description": "Bad named entity: ImaginaryI without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ImaginaryI"
+ ]
+ ]
+ },
+ {
+ "input": "&ImaginaryI;",
+ "description": "Named entity: ImaginaryI; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2148"
+ ]
+ ]
+ },
+ {
+ "input": "&Implies",
+ "description": "Bad named entity: Implies without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Implies"
+ ]
+ ]
+ },
+ {
+ "input": "&Implies;",
+ "description": "Named entity: Implies; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d2"
+ ]
+ ]
+ },
+ {
+ "input": "&Int",
+ "description": "Bad named entity: Int without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Int"
+ ]
+ ]
+ },
+ {
+ "input": "&Int;",
+ "description": "Named entity: Int; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222c"
+ ]
+ ]
+ },
+ {
+ "input": "&Integral",
+ "description": "Bad named entity: Integral without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Integral"
+ ]
+ ]
+ },
+ {
+ "input": "&Integral;",
+ "description": "Named entity: Integral; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222b"
+ ]
+ ]
+ },
+ {
+ "input": "&Intersection",
+ "description": "Bad named entity: Intersection without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Intersection"
+ ]
+ ]
+ },
+ {
+ "input": "&Intersection;",
+ "description": "Named entity: Intersection; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c2"
+ ]
+ ]
+ },
+ {
+ "input": "&InvisibleComma",
+ "description": "Bad named entity: InvisibleComma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&InvisibleComma"
+ ]
+ ]
+ },
+ {
+ "input": "&InvisibleComma;",
+ "description": "Named entity: InvisibleComma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2063"
+ ]
+ ]
+ },
+ {
+ "input": "&InvisibleTimes",
+ "description": "Bad named entity: InvisibleTimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&InvisibleTimes"
+ ]
+ ]
+ },
+ {
+ "input": "&InvisibleTimes;",
+ "description": "Named entity: InvisibleTimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2062"
+ ]
+ ]
+ },
+ {
+ "input": "&Iogon",
+ "description": "Bad named entity: Iogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Iogon"
+ ]
+ ]
+ },
+ {
+ "input": "&Iogon;",
+ "description": "Named entity: Iogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u012e"
+ ]
+ ]
+ },
+ {
+ "input": "&Iopf",
+ "description": "Bad named entity: Iopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Iopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Iopf;",
+ "description": "Named entity: Iopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd40"
+ ]
+ ]
+ },
+ {
+ "input": "&Iota",
+ "description": "Bad named entity: Iota without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Iota"
+ ]
+ ]
+ },
+ {
+ "input": "&Iota;",
+ "description": "Named entity: Iota; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0399"
+ ]
+ ]
+ },
+ {
+ "input": "&Iscr",
+ "description": "Bad named entity: Iscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Iscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Iscr;",
+ "description": "Named entity: Iscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2110"
+ ]
+ ]
+ },
+ {
+ "input": "&Itilde",
+ "description": "Bad named entity: Itilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Itilde"
+ ]
+ ]
+ },
+ {
+ "input": "&Itilde;",
+ "description": "Named entity: Itilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0128"
+ ]
+ ]
+ },
+ {
+ "input": "&Iukcy",
+ "description": "Bad named entity: Iukcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Iukcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Iukcy;",
+ "description": "Named entity: Iukcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0406"
+ ]
+ ]
+ },
+ {
+ "input": "&Iuml",
+ "description": "Named entity: Iuml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00cf"
+ ]
+ ]
+ },
+ {
+ "input": "&Iuml;",
+ "description": "Named entity: Iuml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00cf"
+ ]
+ ]
+ },
+ {
+ "input": "&Jcirc",
+ "description": "Bad named entity: Jcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Jcirc;",
+ "description": "Named entity: Jcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0134"
+ ]
+ ]
+ },
+ {
+ "input": "&Jcy",
+ "description": "Bad named entity: Jcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Jcy;",
+ "description": "Named entity: Jcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0419"
+ ]
+ ]
+ },
+ {
+ "input": "&Jfr",
+ "description": "Bad named entity: Jfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Jfr;",
+ "description": "Named entity: Jfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd0d"
+ ]
+ ]
+ },
+ {
+ "input": "&Jopf",
+ "description": "Bad named entity: Jopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Jopf;",
+ "description": "Named entity: Jopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd41"
+ ]
+ ]
+ },
+ {
+ "input": "&Jscr",
+ "description": "Bad named entity: Jscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Jscr;",
+ "description": "Named entity: Jscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udca5"
+ ]
+ ]
+ },
+ {
+ "input": "&Jsercy",
+ "description": "Bad named entity: Jsercy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jsercy"
+ ]
+ ]
+ },
+ {
+ "input": "&Jsercy;",
+ "description": "Named entity: Jsercy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0408"
+ ]
+ ]
+ },
+ {
+ "input": "&Jukcy",
+ "description": "Bad named entity: Jukcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Jukcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Jukcy;",
+ "description": "Named entity: Jukcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0404"
+ ]
+ ]
+ },
+ {
+ "input": "&KHcy",
+ "description": "Bad named entity: KHcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&KHcy"
+ ]
+ ]
+ },
+ {
+ "input": "&KHcy;",
+ "description": "Named entity: KHcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0425"
+ ]
+ ]
+ },
+ {
+ "input": "&KJcy",
+ "description": "Bad named entity: KJcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&KJcy"
+ ]
+ ]
+ },
+ {
+ "input": "&KJcy;",
+ "description": "Named entity: KJcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u040c"
+ ]
+ ]
+ },
+ {
+ "input": "&Kappa",
+ "description": "Bad named entity: Kappa without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Kappa"
+ ]
+ ]
+ },
+ {
+ "input": "&Kappa;",
+ "description": "Named entity: Kappa; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u039a"
+ ]
+ ]
+ },
+ {
+ "input": "&Kcedil",
+ "description": "Bad named entity: Kcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Kcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Kcedil;",
+ "description": "Named entity: Kcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0136"
+ ]
+ ]
+ },
+ {
+ "input": "&Kcy",
+ "description": "Bad named entity: Kcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Kcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Kcy;",
+ "description": "Named entity: Kcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u041a"
+ ]
+ ]
+ },
+ {
+ "input": "&Kfr",
+ "description": "Bad named entity: Kfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Kfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Kfr;",
+ "description": "Named entity: Kfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd0e"
+ ]
+ ]
+ },
+ {
+ "input": "&Kopf",
+ "description": "Bad named entity: Kopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Kopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Kopf;",
+ "description": "Named entity: Kopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd42"
+ ]
+ ]
+ },
+ {
+ "input": "&Kscr",
+ "description": "Bad named entity: Kscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Kscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Kscr;",
+ "description": "Named entity: Kscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udca6"
+ ]
+ ]
+ },
+ {
+ "input": "&LJcy",
+ "description": "Bad named entity: LJcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LJcy"
+ ]
+ ]
+ },
+ {
+ "input": "&LJcy;",
+ "description": "Named entity: LJcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0409"
+ ]
+ ]
+ },
+ {
+ "input": "&LT",
+ "description": "Named entity: LT without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "<"
+ ]
+ ]
+ },
+ {
+ "input": "&LT;",
+ "description": "Named entity: LT; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "<"
+ ]
+ ]
+ },
+ {
+ "input": "&Lacute",
+ "description": "Bad named entity: Lacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lacute"
+ ]
+ ]
+ },
+ {
+ "input": "&Lacute;",
+ "description": "Named entity: Lacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0139"
+ ]
+ ]
+ },
+ {
+ "input": "&Lambda",
+ "description": "Bad named entity: Lambda without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lambda"
+ ]
+ ]
+ },
+ {
+ "input": "&Lambda;",
+ "description": "Named entity: Lambda; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u039b"
+ ]
+ ]
+ },
+ {
+ "input": "&Lang",
+ "description": "Bad named entity: Lang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lang"
+ ]
+ ]
+ },
+ {
+ "input": "&Lang;",
+ "description": "Named entity: Lang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27ea"
+ ]
+ ]
+ },
+ {
+ "input": "&Laplacetrf",
+ "description": "Bad named entity: Laplacetrf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Laplacetrf"
+ ]
+ ]
+ },
+ {
+ "input": "&Laplacetrf;",
+ "description": "Named entity: Laplacetrf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2112"
+ ]
+ ]
+ },
+ {
+ "input": "&Larr",
+ "description": "Bad named entity: Larr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Larr"
+ ]
+ ]
+ },
+ {
+ "input": "&Larr;",
+ "description": "Named entity: Larr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219e"
+ ]
+ ]
+ },
+ {
+ "input": "&Lcaron",
+ "description": "Bad named entity: Lcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Lcaron;",
+ "description": "Named entity: Lcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u013d"
+ ]
+ ]
+ },
+ {
+ "input": "&Lcedil",
+ "description": "Bad named entity: Lcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Lcedil;",
+ "description": "Named entity: Lcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u013b"
+ ]
+ ]
+ },
+ {
+ "input": "&Lcy",
+ "description": "Bad named entity: Lcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Lcy;",
+ "description": "Named entity: Lcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u041b"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftAngleBracket",
+ "description": "Bad named entity: LeftAngleBracket without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftAngleBracket"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftAngleBracket;",
+ "description": "Named entity: LeftAngleBracket; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e8"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftArrow",
+ "description": "Bad named entity: LeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftArrow;",
+ "description": "Named entity: LeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2190"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftArrowBar",
+ "description": "Bad named entity: LeftArrowBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftArrowBar"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftArrowBar;",
+ "description": "Named entity: LeftArrowBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21e4"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftArrowRightArrow",
+ "description": "Bad named entity: LeftArrowRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftArrowRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftArrowRightArrow;",
+ "description": "Named entity: LeftArrowRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c6"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftCeiling",
+ "description": "Bad named entity: LeftCeiling without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftCeiling"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftCeiling;",
+ "description": "Named entity: LeftCeiling; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2308"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDoubleBracket",
+ "description": "Bad named entity: LeftDoubleBracket without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftDoubleBracket"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDoubleBracket;",
+ "description": "Named entity: LeftDoubleBracket; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e6"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDownTeeVector",
+ "description": "Bad named entity: LeftDownTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftDownTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDownTeeVector;",
+ "description": "Named entity: LeftDownTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2961"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDownVector",
+ "description": "Bad named entity: LeftDownVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftDownVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDownVector;",
+ "description": "Named entity: LeftDownVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c3"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDownVectorBar",
+ "description": "Bad named entity: LeftDownVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftDownVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftDownVectorBar;",
+ "description": "Named entity: LeftDownVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2959"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftFloor",
+ "description": "Bad named entity: LeftFloor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftFloor"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftFloor;",
+ "description": "Named entity: LeftFloor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230a"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftRightArrow",
+ "description": "Bad named entity: LeftRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftRightArrow;",
+ "description": "Named entity: LeftRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2194"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftRightVector",
+ "description": "Bad named entity: LeftRightVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftRightVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftRightVector;",
+ "description": "Named entity: LeftRightVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u294e"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTee",
+ "description": "Bad named entity: LeftTee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftTee"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTee;",
+ "description": "Named entity: LeftTee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a3"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTeeArrow",
+ "description": "Bad named entity: LeftTeeArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftTeeArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTeeArrow;",
+ "description": "Named entity: LeftTeeArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a4"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTeeVector",
+ "description": "Bad named entity: LeftTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTeeVector;",
+ "description": "Named entity: LeftTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u295a"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTriangle",
+ "description": "Bad named entity: LeftTriangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftTriangle"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTriangle;",
+ "description": "Named entity: LeftTriangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b2"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTriangleBar",
+ "description": "Bad named entity: LeftTriangleBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftTriangleBar"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTriangleBar;",
+ "description": "Named entity: LeftTriangleBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29cf"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTriangleEqual",
+ "description": "Bad named entity: LeftTriangleEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftTriangleEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftTriangleEqual;",
+ "description": "Named entity: LeftTriangleEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b4"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpDownVector",
+ "description": "Bad named entity: LeftUpDownVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftUpDownVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpDownVector;",
+ "description": "Named entity: LeftUpDownVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2951"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpTeeVector",
+ "description": "Bad named entity: LeftUpTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftUpTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpTeeVector;",
+ "description": "Named entity: LeftUpTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2960"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpVector",
+ "description": "Bad named entity: LeftUpVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftUpVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpVector;",
+ "description": "Named entity: LeftUpVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bf"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpVectorBar",
+ "description": "Bad named entity: LeftUpVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftUpVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftUpVectorBar;",
+ "description": "Named entity: LeftUpVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2958"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftVector",
+ "description": "Bad named entity: LeftVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftVector"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftVector;",
+ "description": "Named entity: LeftVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bc"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftVectorBar",
+ "description": "Bad named entity: LeftVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LeftVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&LeftVectorBar;",
+ "description": "Named entity: LeftVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2952"
+ ]
+ ]
+ },
+ {
+ "input": "&Leftarrow",
+ "description": "Bad named entity: Leftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Leftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Leftarrow;",
+ "description": "Named entity: Leftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d0"
+ ]
+ ]
+ },
+ {
+ "input": "&Leftrightarrow",
+ "description": "Bad named entity: Leftrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Leftrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Leftrightarrow;",
+ "description": "Named entity: Leftrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d4"
+ ]
+ ]
+ },
+ {
+ "input": "&LessEqualGreater",
+ "description": "Bad named entity: LessEqualGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LessEqualGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&LessEqualGreater;",
+ "description": "Named entity: LessEqualGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22da"
+ ]
+ ]
+ },
+ {
+ "input": "&LessFullEqual",
+ "description": "Bad named entity: LessFullEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LessFullEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&LessFullEqual;",
+ "description": "Named entity: LessFullEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2266"
+ ]
+ ]
+ },
+ {
+ "input": "&LessGreater",
+ "description": "Bad named entity: LessGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LessGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&LessGreater;",
+ "description": "Named entity: LessGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2276"
+ ]
+ ]
+ },
+ {
+ "input": "&LessLess",
+ "description": "Bad named entity: LessLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LessLess"
+ ]
+ ]
+ },
+ {
+ "input": "&LessLess;",
+ "description": "Named entity: LessLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa1"
+ ]
+ ]
+ },
+ {
+ "input": "&LessSlantEqual",
+ "description": "Bad named entity: LessSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LessSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&LessSlantEqual;",
+ "description": "Named entity: LessSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7d"
+ ]
+ ]
+ },
+ {
+ "input": "&LessTilde",
+ "description": "Bad named entity: LessTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LessTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&LessTilde;",
+ "description": "Named entity: LessTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2272"
+ ]
+ ]
+ },
+ {
+ "input": "&Lfr",
+ "description": "Bad named entity: Lfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Lfr;",
+ "description": "Named entity: Lfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd0f"
+ ]
+ ]
+ },
+ {
+ "input": "&Ll",
+ "description": "Bad named entity: Ll without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ll"
+ ]
+ ]
+ },
+ {
+ "input": "&Ll;",
+ "description": "Named entity: Ll; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d8"
+ ]
+ ]
+ },
+ {
+ "input": "&Lleftarrow",
+ "description": "Bad named entity: Lleftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lleftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Lleftarrow;",
+ "description": "Named entity: Lleftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21da"
+ ]
+ ]
+ },
+ {
+ "input": "&Lmidot",
+ "description": "Bad named entity: Lmidot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lmidot"
+ ]
+ ]
+ },
+ {
+ "input": "&Lmidot;",
+ "description": "Named entity: Lmidot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u013f"
+ ]
+ ]
+ },
+ {
+ "input": "&LongLeftArrow",
+ "description": "Bad named entity: LongLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LongLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LongLeftArrow;",
+ "description": "Named entity: LongLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f5"
+ ]
+ ]
+ },
+ {
+ "input": "&LongLeftRightArrow",
+ "description": "Bad named entity: LongLeftRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LongLeftRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LongLeftRightArrow;",
+ "description": "Named entity: LongLeftRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f7"
+ ]
+ ]
+ },
+ {
+ "input": "&LongRightArrow",
+ "description": "Bad named entity: LongRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LongRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LongRightArrow;",
+ "description": "Named entity: LongRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f6"
+ ]
+ ]
+ },
+ {
+ "input": "&Longleftarrow",
+ "description": "Bad named entity: Longleftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Longleftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Longleftarrow;",
+ "description": "Named entity: Longleftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f8"
+ ]
+ ]
+ },
+ {
+ "input": "&Longleftrightarrow",
+ "description": "Bad named entity: Longleftrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Longleftrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Longleftrightarrow;",
+ "description": "Named entity: Longleftrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27fa"
+ ]
+ ]
+ },
+ {
+ "input": "&Longrightarrow",
+ "description": "Bad named entity: Longrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Longrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Longrightarrow;",
+ "description": "Named entity: Longrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f9"
+ ]
+ ]
+ },
+ {
+ "input": "&Lopf",
+ "description": "Bad named entity: Lopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Lopf;",
+ "description": "Named entity: Lopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd43"
+ ]
+ ]
+ },
+ {
+ "input": "&LowerLeftArrow",
+ "description": "Bad named entity: LowerLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LowerLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LowerLeftArrow;",
+ "description": "Named entity: LowerLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2199"
+ ]
+ ]
+ },
+ {
+ "input": "&LowerRightArrow",
+ "description": "Bad named entity: LowerRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&LowerRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&LowerRightArrow;",
+ "description": "Named entity: LowerRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2198"
+ ]
+ ]
+ },
+ {
+ "input": "&Lscr",
+ "description": "Bad named entity: Lscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Lscr;",
+ "description": "Named entity: Lscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2112"
+ ]
+ ]
+ },
+ {
+ "input": "&Lsh",
+ "description": "Bad named entity: Lsh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lsh"
+ ]
+ ]
+ },
+ {
+ "input": "&Lsh;",
+ "description": "Named entity: Lsh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b0"
+ ]
+ ]
+ },
+ {
+ "input": "&Lstrok",
+ "description": "Bad named entity: Lstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&Lstrok;",
+ "description": "Named entity: Lstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0141"
+ ]
+ ]
+ },
+ {
+ "input": "&Lt",
+ "description": "Bad named entity: Lt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Lt"
+ ]
+ ]
+ },
+ {
+ "input": "&Lt;",
+ "description": "Named entity: Lt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226a"
+ ]
+ ]
+ },
+ {
+ "input": "&Map",
+ "description": "Bad named entity: Map without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Map"
+ ]
+ ]
+ },
+ {
+ "input": "&Map;",
+ "description": "Named entity: Map; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2905"
+ ]
+ ]
+ },
+ {
+ "input": "&Mcy",
+ "description": "Bad named entity: Mcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Mcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Mcy;",
+ "description": "Named entity: Mcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u041c"
+ ]
+ ]
+ },
+ {
+ "input": "&MediumSpace",
+ "description": "Bad named entity: MediumSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&MediumSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&MediumSpace;",
+ "description": "Named entity: MediumSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u205f"
+ ]
+ ]
+ },
+ {
+ "input": "&Mellintrf",
+ "description": "Bad named entity: Mellintrf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Mellintrf"
+ ]
+ ]
+ },
+ {
+ "input": "&Mellintrf;",
+ "description": "Named entity: Mellintrf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2133"
+ ]
+ ]
+ },
+ {
+ "input": "&Mfr",
+ "description": "Bad named entity: Mfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Mfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Mfr;",
+ "description": "Named entity: Mfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd10"
+ ]
+ ]
+ },
+ {
+ "input": "&MinusPlus",
+ "description": "Bad named entity: MinusPlus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&MinusPlus"
+ ]
+ ]
+ },
+ {
+ "input": "&MinusPlus;",
+ "description": "Named entity: MinusPlus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2213"
+ ]
+ ]
+ },
+ {
+ "input": "&Mopf",
+ "description": "Bad named entity: Mopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Mopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Mopf;",
+ "description": "Named entity: Mopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd44"
+ ]
+ ]
+ },
+ {
+ "input": "&Mscr",
+ "description": "Bad named entity: Mscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Mscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Mscr;",
+ "description": "Named entity: Mscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2133"
+ ]
+ ]
+ },
+ {
+ "input": "&Mu",
+ "description": "Bad named entity: Mu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Mu"
+ ]
+ ]
+ },
+ {
+ "input": "&Mu;",
+ "description": "Named entity: Mu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u039c"
+ ]
+ ]
+ },
+ {
+ "input": "&NJcy",
+ "description": "Bad named entity: NJcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NJcy"
+ ]
+ ]
+ },
+ {
+ "input": "&NJcy;",
+ "description": "Named entity: NJcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u040a"
+ ]
+ ]
+ },
+ {
+ "input": "&Nacute",
+ "description": "Bad named entity: Nacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Nacute"
+ ]
+ ]
+ },
+ {
+ "input": "&Nacute;",
+ "description": "Named entity: Nacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0143"
+ ]
+ ]
+ },
+ {
+ "input": "&Ncaron",
+ "description": "Bad named entity: Ncaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ncaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Ncaron;",
+ "description": "Named entity: Ncaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0147"
+ ]
+ ]
+ },
+ {
+ "input": "&Ncedil",
+ "description": "Bad named entity: Ncedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ncedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Ncedil;",
+ "description": "Named entity: Ncedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0145"
+ ]
+ ]
+ },
+ {
+ "input": "&Ncy",
+ "description": "Bad named entity: Ncy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ncy"
+ ]
+ ]
+ },
+ {
+ "input": "&Ncy;",
+ "description": "Named entity: Ncy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u041d"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeMediumSpace",
+ "description": "Bad named entity: NegativeMediumSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NegativeMediumSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeMediumSpace;",
+ "description": "Named entity: NegativeMediumSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200b"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeThickSpace",
+ "description": "Bad named entity: NegativeThickSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NegativeThickSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeThickSpace;",
+ "description": "Named entity: NegativeThickSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200b"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeThinSpace",
+ "description": "Bad named entity: NegativeThinSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NegativeThinSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeThinSpace;",
+ "description": "Named entity: NegativeThinSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200b"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeVeryThinSpace",
+ "description": "Bad named entity: NegativeVeryThinSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NegativeVeryThinSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&NegativeVeryThinSpace;",
+ "description": "Named entity: NegativeVeryThinSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200b"
+ ]
+ ]
+ },
+ {
+ "input": "&NestedGreaterGreater",
+ "description": "Bad named entity: NestedGreaterGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NestedGreaterGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&NestedGreaterGreater;",
+ "description": "Named entity: NestedGreaterGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226b"
+ ]
+ ]
+ },
+ {
+ "input": "&NestedLessLess",
+ "description": "Bad named entity: NestedLessLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NestedLessLess"
+ ]
+ ]
+ },
+ {
+ "input": "&NestedLessLess;",
+ "description": "Named entity: NestedLessLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226a"
+ ]
+ ]
+ },
+ {
+ "input": "&NewLine",
+ "description": "Bad named entity: NewLine without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NewLine"
+ ]
+ ]
+ },
+ {
+ "input": "&NewLine;",
+ "description": "Named entity: NewLine; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\n"
+ ]
+ ]
+ },
+ {
+ "input": "&Nfr",
+ "description": "Bad named entity: Nfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Nfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Nfr;",
+ "description": "Named entity: Nfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd11"
+ ]
+ ]
+ },
+ {
+ "input": "&NoBreak",
+ "description": "Bad named entity: NoBreak without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NoBreak"
+ ]
+ ]
+ },
+ {
+ "input": "&NoBreak;",
+ "description": "Named entity: NoBreak; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2060"
+ ]
+ ]
+ },
+ {
+ "input": "&NonBreakingSpace",
+ "description": "Bad named entity: NonBreakingSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NonBreakingSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&NonBreakingSpace;",
+ "description": "Named entity: NonBreakingSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a0"
+ ]
+ ]
+ },
+ {
+ "input": "&Nopf",
+ "description": "Bad named entity: Nopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Nopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Nopf;",
+ "description": "Named entity: Nopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2115"
+ ]
+ ]
+ },
+ {
+ "input": "&Not",
+ "description": "Bad named entity: Not without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Not"
+ ]
+ ]
+ },
+ {
+ "input": "&Not;",
+ "description": "Named entity: Not; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aec"
+ ]
+ ]
+ },
+ {
+ "input": "&NotCongruent",
+ "description": "Bad named entity: NotCongruent without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotCongruent"
+ ]
+ ]
+ },
+ {
+ "input": "&NotCongruent;",
+ "description": "Named entity: NotCongruent; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2262"
+ ]
+ ]
+ },
+ {
+ "input": "&NotCupCap",
+ "description": "Bad named entity: NotCupCap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotCupCap"
+ ]
+ ]
+ },
+ {
+ "input": "&NotCupCap;",
+ "description": "Named entity: NotCupCap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226d"
+ ]
+ ]
+ },
+ {
+ "input": "&NotDoubleVerticalBar",
+ "description": "Bad named entity: NotDoubleVerticalBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotDoubleVerticalBar"
+ ]
+ ]
+ },
+ {
+ "input": "&NotDoubleVerticalBar;",
+ "description": "Named entity: NotDoubleVerticalBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2226"
+ ]
+ ]
+ },
+ {
+ "input": "&NotElement",
+ "description": "Bad named entity: NotElement without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotElement"
+ ]
+ ]
+ },
+ {
+ "input": "&NotElement;",
+ "description": "Named entity: NotElement; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2209"
+ ]
+ ]
+ },
+ {
+ "input": "&NotEqual",
+ "description": "Bad named entity: NotEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotEqual;",
+ "description": "Named entity: NotEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2260"
+ ]
+ ]
+ },
+ {
+ "input": "&NotEqualTilde",
+ "description": "Bad named entity: NotEqualTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotEqualTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&NotEqualTilde;",
+ "description": "Named entity: NotEqualTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2242\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotExists",
+ "description": "Bad named entity: NotExists without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotExists"
+ ]
+ ]
+ },
+ {
+ "input": "&NotExists;",
+ "description": "Named entity: NotExists; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2204"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreater",
+ "description": "Bad named entity: NotGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreater;",
+ "description": "Named entity: NotGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226f"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterEqual",
+ "description": "Bad named entity: NotGreaterEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreaterEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterEqual;",
+ "description": "Named entity: NotGreaterEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2271"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterFullEqual",
+ "description": "Bad named entity: NotGreaterFullEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreaterFullEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterFullEqual;",
+ "description": "Named entity: NotGreaterFullEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2267\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterGreater",
+ "description": "Bad named entity: NotGreaterGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreaterGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterGreater;",
+ "description": "Named entity: NotGreaterGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226b\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterLess",
+ "description": "Bad named entity: NotGreaterLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreaterLess"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterLess;",
+ "description": "Named entity: NotGreaterLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2279"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterSlantEqual",
+ "description": "Bad named entity: NotGreaterSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreaterSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterSlantEqual;",
+ "description": "Named entity: NotGreaterSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7e\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterTilde",
+ "description": "Bad named entity: NotGreaterTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotGreaterTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&NotGreaterTilde;",
+ "description": "Named entity: NotGreaterTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2275"
+ ]
+ ]
+ },
+ {
+ "input": "&NotHumpDownHump",
+ "description": "Bad named entity: NotHumpDownHump without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotHumpDownHump"
+ ]
+ ]
+ },
+ {
+ "input": "&NotHumpDownHump;",
+ "description": "Named entity: NotHumpDownHump; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224e\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotHumpEqual",
+ "description": "Bad named entity: NotHumpEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotHumpEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotHumpEqual;",
+ "description": "Named entity: NotHumpEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224f\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLeftTriangle",
+ "description": "Bad named entity: NotLeftTriangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLeftTriangle"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLeftTriangle;",
+ "description": "Named entity: NotLeftTriangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ea"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLeftTriangleBar",
+ "description": "Bad named entity: NotLeftTriangleBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLeftTriangleBar"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLeftTriangleBar;",
+ "description": "Named entity: NotLeftTriangleBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29cf\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLeftTriangleEqual",
+ "description": "Bad named entity: NotLeftTriangleEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLeftTriangleEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLeftTriangleEqual;",
+ "description": "Named entity: NotLeftTriangleEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ec"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLess",
+ "description": "Bad named entity: NotLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLess"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLess;",
+ "description": "Named entity: NotLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226e"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessEqual",
+ "description": "Bad named entity: NotLessEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLessEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessEqual;",
+ "description": "Named entity: NotLessEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2270"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessGreater",
+ "description": "Bad named entity: NotLessGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLessGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessGreater;",
+ "description": "Named entity: NotLessGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2278"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessLess",
+ "description": "Bad named entity: NotLessLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLessLess"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessLess;",
+ "description": "Named entity: NotLessLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226a\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessSlantEqual",
+ "description": "Bad named entity: NotLessSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLessSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessSlantEqual;",
+ "description": "Named entity: NotLessSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7d\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessTilde",
+ "description": "Bad named entity: NotLessTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotLessTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&NotLessTilde;",
+ "description": "Named entity: NotLessTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2274"
+ ]
+ ]
+ },
+ {
+ "input": "&NotNestedGreaterGreater",
+ "description": "Bad named entity: NotNestedGreaterGreater without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotNestedGreaterGreater"
+ ]
+ ]
+ },
+ {
+ "input": "&NotNestedGreaterGreater;",
+ "description": "Named entity: NotNestedGreaterGreater; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa2\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotNestedLessLess",
+ "description": "Bad named entity: NotNestedLessLess without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotNestedLessLess"
+ ]
+ ]
+ },
+ {
+ "input": "&NotNestedLessLess;",
+ "description": "Named entity: NotNestedLessLess; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa1\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotPrecedes",
+ "description": "Bad named entity: NotPrecedes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotPrecedes"
+ ]
+ ]
+ },
+ {
+ "input": "&NotPrecedes;",
+ "description": "Named entity: NotPrecedes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2280"
+ ]
+ ]
+ },
+ {
+ "input": "&NotPrecedesEqual",
+ "description": "Bad named entity: NotPrecedesEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotPrecedesEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotPrecedesEqual;",
+ "description": "Named entity: NotPrecedesEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaf\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotPrecedesSlantEqual",
+ "description": "Bad named entity: NotPrecedesSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotPrecedesSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotPrecedesSlantEqual;",
+ "description": "Named entity: NotPrecedesSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e0"
+ ]
+ ]
+ },
+ {
+ "input": "&NotReverseElement",
+ "description": "Bad named entity: NotReverseElement without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotReverseElement"
+ ]
+ ]
+ },
+ {
+ "input": "&NotReverseElement;",
+ "description": "Named entity: NotReverseElement; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220c"
+ ]
+ ]
+ },
+ {
+ "input": "&NotRightTriangle",
+ "description": "Bad named entity: NotRightTriangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotRightTriangle"
+ ]
+ ]
+ },
+ {
+ "input": "&NotRightTriangle;",
+ "description": "Named entity: NotRightTriangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22eb"
+ ]
+ ]
+ },
+ {
+ "input": "&NotRightTriangleBar",
+ "description": "Bad named entity: NotRightTriangleBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotRightTriangleBar"
+ ]
+ ]
+ },
+ {
+ "input": "&NotRightTriangleBar;",
+ "description": "Named entity: NotRightTriangleBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29d0\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotRightTriangleEqual",
+ "description": "Bad named entity: NotRightTriangleEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotRightTriangleEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotRightTriangleEqual;",
+ "description": "Named entity: NotRightTriangleEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ed"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSubset",
+ "description": "Bad named entity: NotSquareSubset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSquareSubset"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSubset;",
+ "description": "Named entity: NotSquareSubset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228f\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSubsetEqual",
+ "description": "Bad named entity: NotSquareSubsetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSquareSubsetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSubsetEqual;",
+ "description": "Named entity: NotSquareSubsetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e2"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSuperset",
+ "description": "Bad named entity: NotSquareSuperset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSquareSuperset"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSuperset;",
+ "description": "Named entity: NotSquareSuperset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2290\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSupersetEqual",
+ "description": "Bad named entity: NotSquareSupersetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSquareSupersetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSquareSupersetEqual;",
+ "description": "Named entity: NotSquareSupersetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e3"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSubset",
+ "description": "Bad named entity: NotSubset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSubset"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSubset;",
+ "description": "Named entity: NotSubset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2282\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSubsetEqual",
+ "description": "Bad named entity: NotSubsetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSubsetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSubsetEqual;",
+ "description": "Named entity: NotSubsetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2288"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceeds",
+ "description": "Bad named entity: NotSucceeds without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSucceeds"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceeds;",
+ "description": "Named entity: NotSucceeds; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2281"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceedsEqual",
+ "description": "Bad named entity: NotSucceedsEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSucceedsEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceedsEqual;",
+ "description": "Named entity: NotSucceedsEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab0\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceedsSlantEqual",
+ "description": "Bad named entity: NotSucceedsSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSucceedsSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceedsSlantEqual;",
+ "description": "Named entity: NotSucceedsSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e1"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceedsTilde",
+ "description": "Bad named entity: NotSucceedsTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSucceedsTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSucceedsTilde;",
+ "description": "Named entity: NotSucceedsTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227f\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSuperset",
+ "description": "Bad named entity: NotSuperset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSuperset"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSuperset;",
+ "description": "Named entity: NotSuperset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2283\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSupersetEqual",
+ "description": "Bad named entity: NotSupersetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotSupersetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotSupersetEqual;",
+ "description": "Named entity: NotSupersetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2289"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTilde",
+ "description": "Bad named entity: NotTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTilde;",
+ "description": "Named entity: NotTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2241"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTildeEqual",
+ "description": "Bad named entity: NotTildeEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotTildeEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTildeEqual;",
+ "description": "Named entity: NotTildeEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2244"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTildeFullEqual",
+ "description": "Bad named entity: NotTildeFullEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotTildeFullEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTildeFullEqual;",
+ "description": "Named entity: NotTildeFullEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2247"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTildeTilde",
+ "description": "Bad named entity: NotTildeTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotTildeTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&NotTildeTilde;",
+ "description": "Named entity: NotTildeTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2249"
+ ]
+ ]
+ },
+ {
+ "input": "&NotVerticalBar",
+ "description": "Bad named entity: NotVerticalBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&NotVerticalBar"
+ ]
+ ]
+ },
+ {
+ "input": "&NotVerticalBar;",
+ "description": "Named entity: NotVerticalBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2224"
+ ]
+ ]
+ },
+ {
+ "input": "&Nscr",
+ "description": "Bad named entity: Nscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Nscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Nscr;",
+ "description": "Named entity: Nscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udca9"
+ ]
+ ]
+ },
+ {
+ "input": "&Ntilde",
+ "description": "Named entity: Ntilde without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d1"
+ ]
+ ]
+ },
+ {
+ "input": "&Ntilde;",
+ "description": "Named entity: Ntilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d1"
+ ]
+ ]
+ },
+ {
+ "input": "&Nu",
+ "description": "Bad named entity: Nu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Nu"
+ ]
+ ]
+ },
+ {
+ "input": "&Nu;",
+ "description": "Named entity: Nu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u039d"
+ ]
+ ]
+ },
+ {
+ "input": "&OElig",
+ "description": "Bad named entity: OElig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OElig"
+ ]
+ ]
+ },
+ {
+ "input": "&OElig;",
+ "description": "Named entity: OElig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0152"
+ ]
+ ]
+ },
+ {
+ "input": "&Oacute",
+ "description": "Named entity: Oacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d3"
+ ]
+ ]
+ },
+ {
+ "input": "&Oacute;",
+ "description": "Named entity: Oacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d3"
+ ]
+ ]
+ },
+ {
+ "input": "&Ocirc",
+ "description": "Named entity: Ocirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d4"
+ ]
+ ]
+ },
+ {
+ "input": "&Ocirc;",
+ "description": "Named entity: Ocirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d4"
+ ]
+ ]
+ },
+ {
+ "input": "&Ocy",
+ "description": "Bad named entity: Ocy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ocy"
+ ]
+ ]
+ },
+ {
+ "input": "&Ocy;",
+ "description": "Named entity: Ocy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u041e"
+ ]
+ ]
+ },
+ {
+ "input": "&Odblac",
+ "description": "Bad named entity: Odblac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Odblac"
+ ]
+ ]
+ },
+ {
+ "input": "&Odblac;",
+ "description": "Named entity: Odblac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0150"
+ ]
+ ]
+ },
+ {
+ "input": "&Ofr",
+ "description": "Bad named entity: Ofr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ofr"
+ ]
+ ]
+ },
+ {
+ "input": "&Ofr;",
+ "description": "Named entity: Ofr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd12"
+ ]
+ ]
+ },
+ {
+ "input": "&Ograve",
+ "description": "Named entity: Ograve without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d2"
+ ]
+ ]
+ },
+ {
+ "input": "&Ograve;",
+ "description": "Named entity: Ograve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d2"
+ ]
+ ]
+ },
+ {
+ "input": "&Omacr",
+ "description": "Bad named entity: Omacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Omacr"
+ ]
+ ]
+ },
+ {
+ "input": "&Omacr;",
+ "description": "Named entity: Omacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u014c"
+ ]
+ ]
+ },
+ {
+ "input": "&Omega",
+ "description": "Bad named entity: Omega without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Omega"
+ ]
+ ]
+ },
+ {
+ "input": "&Omega;",
+ "description": "Named entity: Omega; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a9"
+ ]
+ ]
+ },
+ {
+ "input": "&Omicron",
+ "description": "Bad named entity: Omicron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Omicron"
+ ]
+ ]
+ },
+ {
+ "input": "&Omicron;",
+ "description": "Named entity: Omicron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u039f"
+ ]
+ ]
+ },
+ {
+ "input": "&Oopf",
+ "description": "Bad named entity: Oopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Oopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Oopf;",
+ "description": "Named entity: Oopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd46"
+ ]
+ ]
+ },
+ {
+ "input": "&OpenCurlyDoubleQuote",
+ "description": "Bad named entity: OpenCurlyDoubleQuote without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OpenCurlyDoubleQuote"
+ ]
+ ]
+ },
+ {
+ "input": "&OpenCurlyDoubleQuote;",
+ "description": "Named entity: OpenCurlyDoubleQuote; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201c"
+ ]
+ ]
+ },
+ {
+ "input": "&OpenCurlyQuote",
+ "description": "Bad named entity: OpenCurlyQuote without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OpenCurlyQuote"
+ ]
+ ]
+ },
+ {
+ "input": "&OpenCurlyQuote;",
+ "description": "Named entity: OpenCurlyQuote; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2018"
+ ]
+ ]
+ },
+ {
+ "input": "&Or",
+ "description": "Bad named entity: Or without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Or"
+ ]
+ ]
+ },
+ {
+ "input": "&Or;",
+ "description": "Named entity: Or; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a54"
+ ]
+ ]
+ },
+ {
+ "input": "&Oscr",
+ "description": "Bad named entity: Oscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Oscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Oscr;",
+ "description": "Named entity: Oscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcaa"
+ ]
+ ]
+ },
+ {
+ "input": "&Oslash",
+ "description": "Named entity: Oslash without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d8"
+ ]
+ ]
+ },
+ {
+ "input": "&Oslash;",
+ "description": "Named entity: Oslash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d8"
+ ]
+ ]
+ },
+ {
+ "input": "&Otilde",
+ "description": "Named entity: Otilde without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d5"
+ ]
+ ]
+ },
+ {
+ "input": "&Otilde;",
+ "description": "Named entity: Otilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d5"
+ ]
+ ]
+ },
+ {
+ "input": "&Otimes",
+ "description": "Bad named entity: Otimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Otimes"
+ ]
+ ]
+ },
+ {
+ "input": "&Otimes;",
+ "description": "Named entity: Otimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a37"
+ ]
+ ]
+ },
+ {
+ "input": "&Ouml",
+ "description": "Named entity: Ouml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d6"
+ ]
+ ]
+ },
+ {
+ "input": "&Ouml;",
+ "description": "Named entity: Ouml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d6"
+ ]
+ ]
+ },
+ {
+ "input": "&OverBar",
+ "description": "Bad named entity: OverBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OverBar"
+ ]
+ ]
+ },
+ {
+ "input": "&OverBar;",
+ "description": "Named entity: OverBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u203e"
+ ]
+ ]
+ },
+ {
+ "input": "&OverBrace",
+ "description": "Bad named entity: OverBrace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OverBrace"
+ ]
+ ]
+ },
+ {
+ "input": "&OverBrace;",
+ "description": "Named entity: OverBrace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23de"
+ ]
+ ]
+ },
+ {
+ "input": "&OverBracket",
+ "description": "Bad named entity: OverBracket without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OverBracket"
+ ]
+ ]
+ },
+ {
+ "input": "&OverBracket;",
+ "description": "Named entity: OverBracket; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b4"
+ ]
+ ]
+ },
+ {
+ "input": "&OverParenthesis",
+ "description": "Bad named entity: OverParenthesis without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&OverParenthesis"
+ ]
+ ]
+ },
+ {
+ "input": "&OverParenthesis;",
+ "description": "Named entity: OverParenthesis; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23dc"
+ ]
+ ]
+ },
+ {
+ "input": "&PartialD",
+ "description": "Bad named entity: PartialD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&PartialD"
+ ]
+ ]
+ },
+ {
+ "input": "&PartialD;",
+ "description": "Named entity: PartialD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2202"
+ ]
+ ]
+ },
+ {
+ "input": "&Pcy",
+ "description": "Bad named entity: Pcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Pcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Pcy;",
+ "description": "Named entity: Pcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u041f"
+ ]
+ ]
+ },
+ {
+ "input": "&Pfr",
+ "description": "Bad named entity: Pfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Pfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Pfr;",
+ "description": "Named entity: Pfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd13"
+ ]
+ ]
+ },
+ {
+ "input": "&Phi",
+ "description": "Bad named entity: Phi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Phi"
+ ]
+ ]
+ },
+ {
+ "input": "&Phi;",
+ "description": "Named entity: Phi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a6"
+ ]
+ ]
+ },
+ {
+ "input": "&Pi",
+ "description": "Bad named entity: Pi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Pi"
+ ]
+ ]
+ },
+ {
+ "input": "&Pi;",
+ "description": "Named entity: Pi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a0"
+ ]
+ ]
+ },
+ {
+ "input": "&PlusMinus",
+ "description": "Bad named entity: PlusMinus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&PlusMinus"
+ ]
+ ]
+ },
+ {
+ "input": "&PlusMinus;",
+ "description": "Named entity: PlusMinus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b1"
+ ]
+ ]
+ },
+ {
+ "input": "&Poincareplane",
+ "description": "Bad named entity: Poincareplane without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Poincareplane"
+ ]
+ ]
+ },
+ {
+ "input": "&Poincareplane;",
+ "description": "Named entity: Poincareplane; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210c"
+ ]
+ ]
+ },
+ {
+ "input": "&Popf",
+ "description": "Bad named entity: Popf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Popf"
+ ]
+ ]
+ },
+ {
+ "input": "&Popf;",
+ "description": "Named entity: Popf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2119"
+ ]
+ ]
+ },
+ {
+ "input": "&Pr",
+ "description": "Bad named entity: Pr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Pr"
+ ]
+ ]
+ },
+ {
+ "input": "&Pr;",
+ "description": "Named entity: Pr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2abb"
+ ]
+ ]
+ },
+ {
+ "input": "&Precedes",
+ "description": "Bad named entity: Precedes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Precedes"
+ ]
+ ]
+ },
+ {
+ "input": "&Precedes;",
+ "description": "Named entity: Precedes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227a"
+ ]
+ ]
+ },
+ {
+ "input": "&PrecedesEqual",
+ "description": "Bad named entity: PrecedesEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&PrecedesEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&PrecedesEqual;",
+ "description": "Named entity: PrecedesEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaf"
+ ]
+ ]
+ },
+ {
+ "input": "&PrecedesSlantEqual",
+ "description": "Bad named entity: PrecedesSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&PrecedesSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&PrecedesSlantEqual;",
+ "description": "Named entity: PrecedesSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227c"
+ ]
+ ]
+ },
+ {
+ "input": "&PrecedesTilde",
+ "description": "Bad named entity: PrecedesTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&PrecedesTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&PrecedesTilde;",
+ "description": "Named entity: PrecedesTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227e"
+ ]
+ ]
+ },
+ {
+ "input": "&Prime",
+ "description": "Bad named entity: Prime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Prime"
+ ]
+ ]
+ },
+ {
+ "input": "&Prime;",
+ "description": "Named entity: Prime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2033"
+ ]
+ ]
+ },
+ {
+ "input": "&Product",
+ "description": "Bad named entity: Product without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Product"
+ ]
+ ]
+ },
+ {
+ "input": "&Product;",
+ "description": "Named entity: Product; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220f"
+ ]
+ ]
+ },
+ {
+ "input": "&Proportion",
+ "description": "Bad named entity: Proportion without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Proportion"
+ ]
+ ]
+ },
+ {
+ "input": "&Proportion;",
+ "description": "Named entity: Proportion; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2237"
+ ]
+ ]
+ },
+ {
+ "input": "&Proportional",
+ "description": "Bad named entity: Proportional without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Proportional"
+ ]
+ ]
+ },
+ {
+ "input": "&Proportional;",
+ "description": "Named entity: Proportional; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221d"
+ ]
+ ]
+ },
+ {
+ "input": "&Pscr",
+ "description": "Bad named entity: Pscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Pscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Pscr;",
+ "description": "Named entity: Pscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcab"
+ ]
+ ]
+ },
+ {
+ "input": "&Psi",
+ "description": "Bad named entity: Psi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Psi"
+ ]
+ ]
+ },
+ {
+ "input": "&Psi;",
+ "description": "Named entity: Psi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a8"
+ ]
+ ]
+ },
+ {
+ "input": "&QUOT",
+ "description": "Named entity: QUOT without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\""
+ ]
+ ]
+ },
+ {
+ "input": "&QUOT;",
+ "description": "Named entity: QUOT; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\""
+ ]
+ ]
+ },
+ {
+ "input": "&Qfr",
+ "description": "Bad named entity: Qfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Qfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Qfr;",
+ "description": "Named entity: Qfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd14"
+ ]
+ ]
+ },
+ {
+ "input": "&Qopf",
+ "description": "Bad named entity: Qopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Qopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Qopf;",
+ "description": "Named entity: Qopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211a"
+ ]
+ ]
+ },
+ {
+ "input": "&Qscr",
+ "description": "Bad named entity: Qscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Qscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Qscr;",
+ "description": "Named entity: Qscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcac"
+ ]
+ ]
+ },
+ {
+ "input": "&RBarr",
+ "description": "Bad named entity: RBarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RBarr"
+ ]
+ ]
+ },
+ {
+ "input": "&RBarr;",
+ "description": "Named entity: RBarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2910"
+ ]
+ ]
+ },
+ {
+ "input": "&REG",
+ "description": "Named entity: REG without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ae"
+ ]
+ ]
+ },
+ {
+ "input": "&REG;",
+ "description": "Named entity: REG; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ae"
+ ]
+ ]
+ },
+ {
+ "input": "&Racute",
+ "description": "Bad named entity: Racute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Racute"
+ ]
+ ]
+ },
+ {
+ "input": "&Racute;",
+ "description": "Named entity: Racute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0154"
+ ]
+ ]
+ },
+ {
+ "input": "&Rang",
+ "description": "Bad named entity: Rang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rang"
+ ]
+ ]
+ },
+ {
+ "input": "&Rang;",
+ "description": "Named entity: Rang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27eb"
+ ]
+ ]
+ },
+ {
+ "input": "&Rarr",
+ "description": "Bad named entity: Rarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rarr"
+ ]
+ ]
+ },
+ {
+ "input": "&Rarr;",
+ "description": "Named entity: Rarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a0"
+ ]
+ ]
+ },
+ {
+ "input": "&Rarrtl",
+ "description": "Bad named entity: Rarrtl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rarrtl"
+ ]
+ ]
+ },
+ {
+ "input": "&Rarrtl;",
+ "description": "Named entity: Rarrtl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2916"
+ ]
+ ]
+ },
+ {
+ "input": "&Rcaron",
+ "description": "Bad named entity: Rcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Rcaron;",
+ "description": "Named entity: Rcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0158"
+ ]
+ ]
+ },
+ {
+ "input": "&Rcedil",
+ "description": "Bad named entity: Rcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Rcedil;",
+ "description": "Named entity: Rcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0156"
+ ]
+ ]
+ },
+ {
+ "input": "&Rcy",
+ "description": "Bad named entity: Rcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Rcy;",
+ "description": "Named entity: Rcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0420"
+ ]
+ ]
+ },
+ {
+ "input": "&Re",
+ "description": "Bad named entity: Re without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Re"
+ ]
+ ]
+ },
+ {
+ "input": "&Re;",
+ "description": "Named entity: Re; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211c"
+ ]
+ ]
+ },
+ {
+ "input": "&ReverseElement",
+ "description": "Bad named entity: ReverseElement without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ReverseElement"
+ ]
+ ]
+ },
+ {
+ "input": "&ReverseElement;",
+ "description": "Named entity: ReverseElement; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220b"
+ ]
+ ]
+ },
+ {
+ "input": "&ReverseEquilibrium",
+ "description": "Bad named entity: ReverseEquilibrium without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ReverseEquilibrium"
+ ]
+ ]
+ },
+ {
+ "input": "&ReverseEquilibrium;",
+ "description": "Named entity: ReverseEquilibrium; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cb"
+ ]
+ ]
+ },
+ {
+ "input": "&ReverseUpEquilibrium",
+ "description": "Bad named entity: ReverseUpEquilibrium without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ReverseUpEquilibrium"
+ ]
+ ]
+ },
+ {
+ "input": "&ReverseUpEquilibrium;",
+ "description": "Named entity: ReverseUpEquilibrium; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296f"
+ ]
+ ]
+ },
+ {
+ "input": "&Rfr",
+ "description": "Bad named entity: Rfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Rfr;",
+ "description": "Named entity: Rfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211c"
+ ]
+ ]
+ },
+ {
+ "input": "&Rho",
+ "description": "Bad named entity: Rho without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rho"
+ ]
+ ]
+ },
+ {
+ "input": "&Rho;",
+ "description": "Named entity: Rho; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a1"
+ ]
+ ]
+ },
+ {
+ "input": "&RightAngleBracket",
+ "description": "Bad named entity: RightAngleBracket without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightAngleBracket"
+ ]
+ ]
+ },
+ {
+ "input": "&RightAngleBracket;",
+ "description": "Named entity: RightAngleBracket; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e9"
+ ]
+ ]
+ },
+ {
+ "input": "&RightArrow",
+ "description": "Bad named entity: RightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&RightArrow;",
+ "description": "Named entity: RightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2192"
+ ]
+ ]
+ },
+ {
+ "input": "&RightArrowBar",
+ "description": "Bad named entity: RightArrowBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightArrowBar"
+ ]
+ ]
+ },
+ {
+ "input": "&RightArrowBar;",
+ "description": "Named entity: RightArrowBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21e5"
+ ]
+ ]
+ },
+ {
+ "input": "&RightArrowLeftArrow",
+ "description": "Bad named entity: RightArrowLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightArrowLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&RightArrowLeftArrow;",
+ "description": "Named entity: RightArrowLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c4"
+ ]
+ ]
+ },
+ {
+ "input": "&RightCeiling",
+ "description": "Bad named entity: RightCeiling without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightCeiling"
+ ]
+ ]
+ },
+ {
+ "input": "&RightCeiling;",
+ "description": "Named entity: RightCeiling; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2309"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDoubleBracket",
+ "description": "Bad named entity: RightDoubleBracket without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightDoubleBracket"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDoubleBracket;",
+ "description": "Named entity: RightDoubleBracket; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e7"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDownTeeVector",
+ "description": "Bad named entity: RightDownTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightDownTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDownTeeVector;",
+ "description": "Named entity: RightDownTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u295d"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDownVector",
+ "description": "Bad named entity: RightDownVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightDownVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDownVector;",
+ "description": "Named entity: RightDownVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c2"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDownVectorBar",
+ "description": "Bad named entity: RightDownVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightDownVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&RightDownVectorBar;",
+ "description": "Named entity: RightDownVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2955"
+ ]
+ ]
+ },
+ {
+ "input": "&RightFloor",
+ "description": "Bad named entity: RightFloor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightFloor"
+ ]
+ ]
+ },
+ {
+ "input": "&RightFloor;",
+ "description": "Named entity: RightFloor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230b"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTee",
+ "description": "Bad named entity: RightTee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightTee"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTee;",
+ "description": "Named entity: RightTee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a2"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTeeArrow",
+ "description": "Bad named entity: RightTeeArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightTeeArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTeeArrow;",
+ "description": "Named entity: RightTeeArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a6"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTeeVector",
+ "description": "Bad named entity: RightTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTeeVector;",
+ "description": "Named entity: RightTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u295b"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTriangle",
+ "description": "Bad named entity: RightTriangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightTriangle"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTriangle;",
+ "description": "Named entity: RightTriangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b3"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTriangleBar",
+ "description": "Bad named entity: RightTriangleBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightTriangleBar"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTriangleBar;",
+ "description": "Named entity: RightTriangleBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29d0"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTriangleEqual",
+ "description": "Bad named entity: RightTriangleEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightTriangleEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&RightTriangleEqual;",
+ "description": "Named entity: RightTriangleEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b5"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpDownVector",
+ "description": "Bad named entity: RightUpDownVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightUpDownVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpDownVector;",
+ "description": "Named entity: RightUpDownVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u294f"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpTeeVector",
+ "description": "Bad named entity: RightUpTeeVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightUpTeeVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpTeeVector;",
+ "description": "Named entity: RightUpTeeVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u295c"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpVector",
+ "description": "Bad named entity: RightUpVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightUpVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpVector;",
+ "description": "Named entity: RightUpVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21be"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpVectorBar",
+ "description": "Bad named entity: RightUpVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightUpVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&RightUpVectorBar;",
+ "description": "Named entity: RightUpVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2954"
+ ]
+ ]
+ },
+ {
+ "input": "&RightVector",
+ "description": "Bad named entity: RightVector without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightVector"
+ ]
+ ]
+ },
+ {
+ "input": "&RightVector;",
+ "description": "Named entity: RightVector; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c0"
+ ]
+ ]
+ },
+ {
+ "input": "&RightVectorBar",
+ "description": "Bad named entity: RightVectorBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RightVectorBar"
+ ]
+ ]
+ },
+ {
+ "input": "&RightVectorBar;",
+ "description": "Named entity: RightVectorBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2953"
+ ]
+ ]
+ },
+ {
+ "input": "&Rightarrow",
+ "description": "Bad named entity: Rightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Rightarrow;",
+ "description": "Named entity: Rightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d2"
+ ]
+ ]
+ },
+ {
+ "input": "&Ropf",
+ "description": "Bad named entity: Ropf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ropf"
+ ]
+ ]
+ },
+ {
+ "input": "&Ropf;",
+ "description": "Named entity: Ropf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211d"
+ ]
+ ]
+ },
+ {
+ "input": "&RoundImplies",
+ "description": "Bad named entity: RoundImplies without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RoundImplies"
+ ]
+ ]
+ },
+ {
+ "input": "&RoundImplies;",
+ "description": "Named entity: RoundImplies; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2970"
+ ]
+ ]
+ },
+ {
+ "input": "&Rrightarrow",
+ "description": "Bad named entity: Rrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Rrightarrow;",
+ "description": "Named entity: Rrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21db"
+ ]
+ ]
+ },
+ {
+ "input": "&Rscr",
+ "description": "Bad named entity: Rscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Rscr;",
+ "description": "Named entity: Rscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211b"
+ ]
+ ]
+ },
+ {
+ "input": "&Rsh",
+ "description": "Bad named entity: Rsh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Rsh"
+ ]
+ ]
+ },
+ {
+ "input": "&Rsh;",
+ "description": "Named entity: Rsh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b1"
+ ]
+ ]
+ },
+ {
+ "input": "&RuleDelayed",
+ "description": "Bad named entity: RuleDelayed without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&RuleDelayed"
+ ]
+ ]
+ },
+ {
+ "input": "&RuleDelayed;",
+ "description": "Named entity: RuleDelayed; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29f4"
+ ]
+ ]
+ },
+ {
+ "input": "&SHCHcy",
+ "description": "Bad named entity: SHCHcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SHCHcy"
+ ]
+ ]
+ },
+ {
+ "input": "&SHCHcy;",
+ "description": "Named entity: SHCHcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0429"
+ ]
+ ]
+ },
+ {
+ "input": "&SHcy",
+ "description": "Bad named entity: SHcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SHcy"
+ ]
+ ]
+ },
+ {
+ "input": "&SHcy;",
+ "description": "Named entity: SHcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0428"
+ ]
+ ]
+ },
+ {
+ "input": "&SOFTcy",
+ "description": "Bad named entity: SOFTcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SOFTcy"
+ ]
+ ]
+ },
+ {
+ "input": "&SOFTcy;",
+ "description": "Named entity: SOFTcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u042c"
+ ]
+ ]
+ },
+ {
+ "input": "&Sacute",
+ "description": "Bad named entity: Sacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sacute"
+ ]
+ ]
+ },
+ {
+ "input": "&Sacute;",
+ "description": "Named entity: Sacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u015a"
+ ]
+ ]
+ },
+ {
+ "input": "&Sc",
+ "description": "Bad named entity: Sc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sc"
+ ]
+ ]
+ },
+ {
+ "input": "&Sc;",
+ "description": "Named entity: Sc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2abc"
+ ]
+ ]
+ },
+ {
+ "input": "&Scaron",
+ "description": "Bad named entity: Scaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Scaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Scaron;",
+ "description": "Named entity: Scaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0160"
+ ]
+ ]
+ },
+ {
+ "input": "&Scedil",
+ "description": "Bad named entity: Scedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Scedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Scedil;",
+ "description": "Named entity: Scedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u015e"
+ ]
+ ]
+ },
+ {
+ "input": "&Scirc",
+ "description": "Bad named entity: Scirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Scirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Scirc;",
+ "description": "Named entity: Scirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u015c"
+ ]
+ ]
+ },
+ {
+ "input": "&Scy",
+ "description": "Bad named entity: Scy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Scy"
+ ]
+ ]
+ },
+ {
+ "input": "&Scy;",
+ "description": "Named entity: Scy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0421"
+ ]
+ ]
+ },
+ {
+ "input": "&Sfr",
+ "description": "Bad named entity: Sfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Sfr;",
+ "description": "Named entity: Sfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd16"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortDownArrow",
+ "description": "Bad named entity: ShortDownArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ShortDownArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortDownArrow;",
+ "description": "Named entity: ShortDownArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2193"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortLeftArrow",
+ "description": "Bad named entity: ShortLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ShortLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortLeftArrow;",
+ "description": "Named entity: ShortLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2190"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortRightArrow",
+ "description": "Bad named entity: ShortRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ShortRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortRightArrow;",
+ "description": "Named entity: ShortRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2192"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortUpArrow",
+ "description": "Bad named entity: ShortUpArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ShortUpArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&ShortUpArrow;",
+ "description": "Named entity: ShortUpArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2191"
+ ]
+ ]
+ },
+ {
+ "input": "&Sigma",
+ "description": "Bad named entity: Sigma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sigma"
+ ]
+ ]
+ },
+ {
+ "input": "&Sigma;",
+ "description": "Named entity: Sigma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a3"
+ ]
+ ]
+ },
+ {
+ "input": "&SmallCircle",
+ "description": "Bad named entity: SmallCircle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SmallCircle"
+ ]
+ ]
+ },
+ {
+ "input": "&SmallCircle;",
+ "description": "Named entity: SmallCircle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2218"
+ ]
+ ]
+ },
+ {
+ "input": "&Sopf",
+ "description": "Bad named entity: Sopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Sopf;",
+ "description": "Named entity: Sopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd4a"
+ ]
+ ]
+ },
+ {
+ "input": "&Sqrt",
+ "description": "Bad named entity: Sqrt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sqrt"
+ ]
+ ]
+ },
+ {
+ "input": "&Sqrt;",
+ "description": "Named entity: Sqrt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221a"
+ ]
+ ]
+ },
+ {
+ "input": "&Square",
+ "description": "Bad named entity: Square without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Square"
+ ]
+ ]
+ },
+ {
+ "input": "&Square;",
+ "description": "Named entity: Square; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25a1"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareIntersection",
+ "description": "Bad named entity: SquareIntersection without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SquareIntersection"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareIntersection;",
+ "description": "Named entity: SquareIntersection; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2293"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSubset",
+ "description": "Bad named entity: SquareSubset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SquareSubset"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSubset;",
+ "description": "Named entity: SquareSubset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228f"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSubsetEqual",
+ "description": "Bad named entity: SquareSubsetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SquareSubsetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSubsetEqual;",
+ "description": "Named entity: SquareSubsetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2291"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSuperset",
+ "description": "Bad named entity: SquareSuperset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SquareSuperset"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSuperset;",
+ "description": "Named entity: SquareSuperset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2290"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSupersetEqual",
+ "description": "Bad named entity: SquareSupersetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SquareSupersetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareSupersetEqual;",
+ "description": "Named entity: SquareSupersetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2292"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareUnion",
+ "description": "Bad named entity: SquareUnion without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SquareUnion"
+ ]
+ ]
+ },
+ {
+ "input": "&SquareUnion;",
+ "description": "Named entity: SquareUnion; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2294"
+ ]
+ ]
+ },
+ {
+ "input": "&Sscr",
+ "description": "Bad named entity: Sscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Sscr;",
+ "description": "Named entity: Sscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcae"
+ ]
+ ]
+ },
+ {
+ "input": "&Star",
+ "description": "Bad named entity: Star without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Star"
+ ]
+ ]
+ },
+ {
+ "input": "&Star;",
+ "description": "Named entity: Star; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c6"
+ ]
+ ]
+ },
+ {
+ "input": "&Sub",
+ "description": "Bad named entity: Sub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sub"
+ ]
+ ]
+ },
+ {
+ "input": "&Sub;",
+ "description": "Named entity: Sub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d0"
+ ]
+ ]
+ },
+ {
+ "input": "&Subset",
+ "description": "Bad named entity: Subset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Subset"
+ ]
+ ]
+ },
+ {
+ "input": "&Subset;",
+ "description": "Named entity: Subset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d0"
+ ]
+ ]
+ },
+ {
+ "input": "&SubsetEqual",
+ "description": "Bad named entity: SubsetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SubsetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&SubsetEqual;",
+ "description": "Named entity: SubsetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2286"
+ ]
+ ]
+ },
+ {
+ "input": "&Succeeds",
+ "description": "Bad named entity: Succeeds without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Succeeds"
+ ]
+ ]
+ },
+ {
+ "input": "&Succeeds;",
+ "description": "Named entity: Succeeds; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227b"
+ ]
+ ]
+ },
+ {
+ "input": "&SucceedsEqual",
+ "description": "Bad named entity: SucceedsEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SucceedsEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&SucceedsEqual;",
+ "description": "Named entity: SucceedsEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab0"
+ ]
+ ]
+ },
+ {
+ "input": "&SucceedsSlantEqual",
+ "description": "Bad named entity: SucceedsSlantEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SucceedsSlantEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&SucceedsSlantEqual;",
+ "description": "Named entity: SucceedsSlantEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227d"
+ ]
+ ]
+ },
+ {
+ "input": "&SucceedsTilde",
+ "description": "Bad named entity: SucceedsTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SucceedsTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&SucceedsTilde;",
+ "description": "Named entity: SucceedsTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227f"
+ ]
+ ]
+ },
+ {
+ "input": "&SuchThat",
+ "description": "Bad named entity: SuchThat without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SuchThat"
+ ]
+ ]
+ },
+ {
+ "input": "&SuchThat;",
+ "description": "Named entity: SuchThat; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220b"
+ ]
+ ]
+ },
+ {
+ "input": "&Sum",
+ "description": "Bad named entity: Sum without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sum"
+ ]
+ ]
+ },
+ {
+ "input": "&Sum;",
+ "description": "Named entity: Sum; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2211"
+ ]
+ ]
+ },
+ {
+ "input": "&Sup",
+ "description": "Bad named entity: Sup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Sup"
+ ]
+ ]
+ },
+ {
+ "input": "&Sup;",
+ "description": "Named entity: Sup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d1"
+ ]
+ ]
+ },
+ {
+ "input": "&Superset",
+ "description": "Bad named entity: Superset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Superset"
+ ]
+ ]
+ },
+ {
+ "input": "&Superset;",
+ "description": "Named entity: Superset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2283"
+ ]
+ ]
+ },
+ {
+ "input": "&SupersetEqual",
+ "description": "Bad named entity: SupersetEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&SupersetEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&SupersetEqual;",
+ "description": "Named entity: SupersetEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2287"
+ ]
+ ]
+ },
+ {
+ "input": "&Supset",
+ "description": "Bad named entity: Supset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Supset"
+ ]
+ ]
+ },
+ {
+ "input": "&Supset;",
+ "description": "Named entity: Supset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d1"
+ ]
+ ]
+ },
+ {
+ "input": "&THORN",
+ "description": "Named entity: THORN without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00de"
+ ]
+ ]
+ },
+ {
+ "input": "&THORN;",
+ "description": "Named entity: THORN; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00de"
+ ]
+ ]
+ },
+ {
+ "input": "&TRADE",
+ "description": "Bad named entity: TRADE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TRADE"
+ ]
+ ]
+ },
+ {
+ "input": "&TRADE;",
+ "description": "Named entity: TRADE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2122"
+ ]
+ ]
+ },
+ {
+ "input": "&TSHcy",
+ "description": "Bad named entity: TSHcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TSHcy"
+ ]
+ ]
+ },
+ {
+ "input": "&TSHcy;",
+ "description": "Named entity: TSHcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u040b"
+ ]
+ ]
+ },
+ {
+ "input": "&TScy",
+ "description": "Bad named entity: TScy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TScy"
+ ]
+ ]
+ },
+ {
+ "input": "&TScy;",
+ "description": "Named entity: TScy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0426"
+ ]
+ ]
+ },
+ {
+ "input": "&Tab",
+ "description": "Bad named entity: Tab without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tab"
+ ]
+ ]
+ },
+ {
+ "input": "&Tab;",
+ "description": "Named entity: Tab; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\t"
+ ]
+ ]
+ },
+ {
+ "input": "&Tau",
+ "description": "Bad named entity: Tau without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tau"
+ ]
+ ]
+ },
+ {
+ "input": "&Tau;",
+ "description": "Named entity: Tau; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a4"
+ ]
+ ]
+ },
+ {
+ "input": "&Tcaron",
+ "description": "Bad named entity: Tcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Tcaron;",
+ "description": "Named entity: Tcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0164"
+ ]
+ ]
+ },
+ {
+ "input": "&Tcedil",
+ "description": "Bad named entity: Tcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&Tcedil;",
+ "description": "Named entity: Tcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0162"
+ ]
+ ]
+ },
+ {
+ "input": "&Tcy",
+ "description": "Bad named entity: Tcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Tcy;",
+ "description": "Named entity: Tcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0422"
+ ]
+ ]
+ },
+ {
+ "input": "&Tfr",
+ "description": "Bad named entity: Tfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Tfr;",
+ "description": "Named entity: Tfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd17"
+ ]
+ ]
+ },
+ {
+ "input": "&Therefore",
+ "description": "Bad named entity: Therefore without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Therefore"
+ ]
+ ]
+ },
+ {
+ "input": "&Therefore;",
+ "description": "Named entity: Therefore; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2234"
+ ]
+ ]
+ },
+ {
+ "input": "&Theta",
+ "description": "Bad named entity: Theta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Theta"
+ ]
+ ]
+ },
+ {
+ "input": "&Theta;",
+ "description": "Named entity: Theta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0398"
+ ]
+ ]
+ },
+ {
+ "input": "&ThickSpace",
+ "description": "Bad named entity: ThickSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ThickSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&ThickSpace;",
+ "description": "Named entity: ThickSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u205f\u200a"
+ ]
+ ]
+ },
+ {
+ "input": "&ThinSpace",
+ "description": "Bad named entity: ThinSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ThinSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&ThinSpace;",
+ "description": "Named entity: ThinSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2009"
+ ]
+ ]
+ },
+ {
+ "input": "&Tilde",
+ "description": "Bad named entity: Tilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tilde"
+ ]
+ ]
+ },
+ {
+ "input": "&Tilde;",
+ "description": "Named entity: Tilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223c"
+ ]
+ ]
+ },
+ {
+ "input": "&TildeEqual",
+ "description": "Bad named entity: TildeEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TildeEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&TildeEqual;",
+ "description": "Named entity: TildeEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2243"
+ ]
+ ]
+ },
+ {
+ "input": "&TildeFullEqual",
+ "description": "Bad named entity: TildeFullEqual without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TildeFullEqual"
+ ]
+ ]
+ },
+ {
+ "input": "&TildeFullEqual;",
+ "description": "Named entity: TildeFullEqual; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2245"
+ ]
+ ]
+ },
+ {
+ "input": "&TildeTilde",
+ "description": "Bad named entity: TildeTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TildeTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&TildeTilde;",
+ "description": "Named entity: TildeTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2248"
+ ]
+ ]
+ },
+ {
+ "input": "&Topf",
+ "description": "Bad named entity: Topf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Topf"
+ ]
+ ]
+ },
+ {
+ "input": "&Topf;",
+ "description": "Named entity: Topf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd4b"
+ ]
+ ]
+ },
+ {
+ "input": "&TripleDot",
+ "description": "Bad named entity: TripleDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&TripleDot"
+ ]
+ ]
+ },
+ {
+ "input": "&TripleDot;",
+ "description": "Named entity: TripleDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u20db"
+ ]
+ ]
+ },
+ {
+ "input": "&Tscr",
+ "description": "Bad named entity: Tscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Tscr;",
+ "description": "Named entity: Tscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcaf"
+ ]
+ ]
+ },
+ {
+ "input": "&Tstrok",
+ "description": "Bad named entity: Tstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Tstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&Tstrok;",
+ "description": "Named entity: Tstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0166"
+ ]
+ ]
+ },
+ {
+ "input": "&Uacute",
+ "description": "Named entity: Uacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00da"
+ ]
+ ]
+ },
+ {
+ "input": "&Uacute;",
+ "description": "Named entity: Uacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00da"
+ ]
+ ]
+ },
+ {
+ "input": "&Uarr",
+ "description": "Bad named entity: Uarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uarr"
+ ]
+ ]
+ },
+ {
+ "input": "&Uarr;",
+ "description": "Named entity: Uarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219f"
+ ]
+ ]
+ },
+ {
+ "input": "&Uarrocir",
+ "description": "Bad named entity: Uarrocir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uarrocir"
+ ]
+ ]
+ },
+ {
+ "input": "&Uarrocir;",
+ "description": "Named entity: Uarrocir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2949"
+ ]
+ ]
+ },
+ {
+ "input": "&Ubrcy",
+ "description": "Bad named entity: Ubrcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ubrcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Ubrcy;",
+ "description": "Named entity: Ubrcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u040e"
+ ]
+ ]
+ },
+ {
+ "input": "&Ubreve",
+ "description": "Bad named entity: Ubreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ubreve"
+ ]
+ ]
+ },
+ {
+ "input": "&Ubreve;",
+ "description": "Named entity: Ubreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u016c"
+ ]
+ ]
+ },
+ {
+ "input": "&Ucirc",
+ "description": "Named entity: Ucirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00db"
+ ]
+ ]
+ },
+ {
+ "input": "&Ucirc;",
+ "description": "Named entity: Ucirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00db"
+ ]
+ ]
+ },
+ {
+ "input": "&Ucy",
+ "description": "Bad named entity: Ucy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ucy"
+ ]
+ ]
+ },
+ {
+ "input": "&Ucy;",
+ "description": "Named entity: Ucy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0423"
+ ]
+ ]
+ },
+ {
+ "input": "&Udblac",
+ "description": "Bad named entity: Udblac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Udblac"
+ ]
+ ]
+ },
+ {
+ "input": "&Udblac;",
+ "description": "Named entity: Udblac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0170"
+ ]
+ ]
+ },
+ {
+ "input": "&Ufr",
+ "description": "Bad named entity: Ufr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ufr"
+ ]
+ ]
+ },
+ {
+ "input": "&Ufr;",
+ "description": "Named entity: Ufr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd18"
+ ]
+ ]
+ },
+ {
+ "input": "&Ugrave",
+ "description": "Named entity: Ugrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d9"
+ ]
+ ]
+ },
+ {
+ "input": "&Ugrave;",
+ "description": "Named entity: Ugrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d9"
+ ]
+ ]
+ },
+ {
+ "input": "&Umacr",
+ "description": "Bad named entity: Umacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Umacr"
+ ]
+ ]
+ },
+ {
+ "input": "&Umacr;",
+ "description": "Named entity: Umacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u016a"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderBar",
+ "description": "Bad named entity: UnderBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UnderBar"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderBar;",
+ "description": "Named entity: UnderBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "_"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderBrace",
+ "description": "Bad named entity: UnderBrace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UnderBrace"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderBrace;",
+ "description": "Named entity: UnderBrace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23df"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderBracket",
+ "description": "Bad named entity: UnderBracket without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UnderBracket"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderBracket;",
+ "description": "Named entity: UnderBracket; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b5"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderParenthesis",
+ "description": "Bad named entity: UnderParenthesis without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UnderParenthesis"
+ ]
+ ]
+ },
+ {
+ "input": "&UnderParenthesis;",
+ "description": "Named entity: UnderParenthesis; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23dd"
+ ]
+ ]
+ },
+ {
+ "input": "&Union",
+ "description": "Bad named entity: Union without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Union"
+ ]
+ ]
+ },
+ {
+ "input": "&Union;",
+ "description": "Named entity: Union; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c3"
+ ]
+ ]
+ },
+ {
+ "input": "&UnionPlus",
+ "description": "Bad named entity: UnionPlus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UnionPlus"
+ ]
+ ]
+ },
+ {
+ "input": "&UnionPlus;",
+ "description": "Named entity: UnionPlus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228e"
+ ]
+ ]
+ },
+ {
+ "input": "&Uogon",
+ "description": "Bad named entity: Uogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uogon"
+ ]
+ ]
+ },
+ {
+ "input": "&Uogon;",
+ "description": "Named entity: Uogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0172"
+ ]
+ ]
+ },
+ {
+ "input": "&Uopf",
+ "description": "Bad named entity: Uopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Uopf;",
+ "description": "Named entity: Uopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd4c"
+ ]
+ ]
+ },
+ {
+ "input": "&UpArrow",
+ "description": "Bad named entity: UpArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&UpArrow;",
+ "description": "Named entity: UpArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2191"
+ ]
+ ]
+ },
+ {
+ "input": "&UpArrowBar",
+ "description": "Bad named entity: UpArrowBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpArrowBar"
+ ]
+ ]
+ },
+ {
+ "input": "&UpArrowBar;",
+ "description": "Named entity: UpArrowBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2912"
+ ]
+ ]
+ },
+ {
+ "input": "&UpArrowDownArrow",
+ "description": "Bad named entity: UpArrowDownArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpArrowDownArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&UpArrowDownArrow;",
+ "description": "Named entity: UpArrowDownArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c5"
+ ]
+ ]
+ },
+ {
+ "input": "&UpDownArrow",
+ "description": "Bad named entity: UpDownArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpDownArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&UpDownArrow;",
+ "description": "Named entity: UpDownArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2195"
+ ]
+ ]
+ },
+ {
+ "input": "&UpEquilibrium",
+ "description": "Bad named entity: UpEquilibrium without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpEquilibrium"
+ ]
+ ]
+ },
+ {
+ "input": "&UpEquilibrium;",
+ "description": "Named entity: UpEquilibrium; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296e"
+ ]
+ ]
+ },
+ {
+ "input": "&UpTee",
+ "description": "Bad named entity: UpTee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpTee"
+ ]
+ ]
+ },
+ {
+ "input": "&UpTee;",
+ "description": "Named entity: UpTee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a5"
+ ]
+ ]
+ },
+ {
+ "input": "&UpTeeArrow",
+ "description": "Bad named entity: UpTeeArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpTeeArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&UpTeeArrow;",
+ "description": "Named entity: UpTeeArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a5"
+ ]
+ ]
+ },
+ {
+ "input": "&Uparrow",
+ "description": "Bad named entity: Uparrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uparrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Uparrow;",
+ "description": "Named entity: Uparrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d1"
+ ]
+ ]
+ },
+ {
+ "input": "&Updownarrow",
+ "description": "Bad named entity: Updownarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Updownarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&Updownarrow;",
+ "description": "Named entity: Updownarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d5"
+ ]
+ ]
+ },
+ {
+ "input": "&UpperLeftArrow",
+ "description": "Bad named entity: UpperLeftArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpperLeftArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&UpperLeftArrow;",
+ "description": "Named entity: UpperLeftArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2196"
+ ]
+ ]
+ },
+ {
+ "input": "&UpperRightArrow",
+ "description": "Bad named entity: UpperRightArrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&UpperRightArrow"
+ ]
+ ]
+ },
+ {
+ "input": "&UpperRightArrow;",
+ "description": "Named entity: UpperRightArrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2197"
+ ]
+ ]
+ },
+ {
+ "input": "&Upsi",
+ "description": "Bad named entity: Upsi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Upsi"
+ ]
+ ]
+ },
+ {
+ "input": "&Upsi;",
+ "description": "Named entity: Upsi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d2"
+ ]
+ ]
+ },
+ {
+ "input": "&Upsilon",
+ "description": "Bad named entity: Upsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Upsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&Upsilon;",
+ "description": "Named entity: Upsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a5"
+ ]
+ ]
+ },
+ {
+ "input": "&Uring",
+ "description": "Bad named entity: Uring without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uring"
+ ]
+ ]
+ },
+ {
+ "input": "&Uring;",
+ "description": "Named entity: Uring; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u016e"
+ ]
+ ]
+ },
+ {
+ "input": "&Uscr",
+ "description": "Bad named entity: Uscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Uscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Uscr;",
+ "description": "Named entity: Uscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb0"
+ ]
+ ]
+ },
+ {
+ "input": "&Utilde",
+ "description": "Bad named entity: Utilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Utilde"
+ ]
+ ]
+ },
+ {
+ "input": "&Utilde;",
+ "description": "Named entity: Utilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0168"
+ ]
+ ]
+ },
+ {
+ "input": "&Uuml",
+ "description": "Named entity: Uuml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00dc"
+ ]
+ ]
+ },
+ {
+ "input": "&Uuml;",
+ "description": "Named entity: Uuml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00dc"
+ ]
+ ]
+ },
+ {
+ "input": "&VDash",
+ "description": "Bad named entity: VDash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&VDash"
+ ]
+ ]
+ },
+ {
+ "input": "&VDash;",
+ "description": "Named entity: VDash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ab"
+ ]
+ ]
+ },
+ {
+ "input": "&Vbar",
+ "description": "Bad named entity: Vbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vbar"
+ ]
+ ]
+ },
+ {
+ "input": "&Vbar;",
+ "description": "Named entity: Vbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aeb"
+ ]
+ ]
+ },
+ {
+ "input": "&Vcy",
+ "description": "Bad named entity: Vcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Vcy;",
+ "description": "Named entity: Vcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0412"
+ ]
+ ]
+ },
+ {
+ "input": "&Vdash",
+ "description": "Bad named entity: Vdash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vdash"
+ ]
+ ]
+ },
+ {
+ "input": "&Vdash;",
+ "description": "Named entity: Vdash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a9"
+ ]
+ ]
+ },
+ {
+ "input": "&Vdashl",
+ "description": "Bad named entity: Vdashl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vdashl"
+ ]
+ ]
+ },
+ {
+ "input": "&Vdashl;",
+ "description": "Named entity: Vdashl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ae6"
+ ]
+ ]
+ },
+ {
+ "input": "&Vee",
+ "description": "Bad named entity: Vee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vee"
+ ]
+ ]
+ },
+ {
+ "input": "&Vee;",
+ "description": "Named entity: Vee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c1"
+ ]
+ ]
+ },
+ {
+ "input": "&Verbar",
+ "description": "Bad named entity: Verbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Verbar"
+ ]
+ ]
+ },
+ {
+ "input": "&Verbar;",
+ "description": "Named entity: Verbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2016"
+ ]
+ ]
+ },
+ {
+ "input": "&Vert",
+ "description": "Bad named entity: Vert without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vert"
+ ]
+ ]
+ },
+ {
+ "input": "&Vert;",
+ "description": "Named entity: Vert; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2016"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalBar",
+ "description": "Bad named entity: VerticalBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&VerticalBar"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalBar;",
+ "description": "Named entity: VerticalBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2223"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalLine",
+ "description": "Bad named entity: VerticalLine without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&VerticalLine"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalLine;",
+ "description": "Named entity: VerticalLine; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "|"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalSeparator",
+ "description": "Bad named entity: VerticalSeparator without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&VerticalSeparator"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalSeparator;",
+ "description": "Named entity: VerticalSeparator; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2758"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalTilde",
+ "description": "Bad named entity: VerticalTilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&VerticalTilde"
+ ]
+ ]
+ },
+ {
+ "input": "&VerticalTilde;",
+ "description": "Named entity: VerticalTilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2240"
+ ]
+ ]
+ },
+ {
+ "input": "&VeryThinSpace",
+ "description": "Bad named entity: VeryThinSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&VeryThinSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&VeryThinSpace;",
+ "description": "Named entity: VeryThinSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200a"
+ ]
+ ]
+ },
+ {
+ "input": "&Vfr",
+ "description": "Bad named entity: Vfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Vfr;",
+ "description": "Named entity: Vfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd19"
+ ]
+ ]
+ },
+ {
+ "input": "&Vopf",
+ "description": "Bad named entity: Vopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Vopf;",
+ "description": "Named entity: Vopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd4d"
+ ]
+ ]
+ },
+ {
+ "input": "&Vscr",
+ "description": "Bad named entity: Vscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Vscr;",
+ "description": "Named entity: Vscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb1"
+ ]
+ ]
+ },
+ {
+ "input": "&Vvdash",
+ "description": "Bad named entity: Vvdash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Vvdash"
+ ]
+ ]
+ },
+ {
+ "input": "&Vvdash;",
+ "description": "Named entity: Vvdash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22aa"
+ ]
+ ]
+ },
+ {
+ "input": "&Wcirc",
+ "description": "Bad named entity: Wcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Wcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Wcirc;",
+ "description": "Named entity: Wcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0174"
+ ]
+ ]
+ },
+ {
+ "input": "&Wedge",
+ "description": "Bad named entity: Wedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Wedge"
+ ]
+ ]
+ },
+ {
+ "input": "&Wedge;",
+ "description": "Named entity: Wedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c0"
+ ]
+ ]
+ },
+ {
+ "input": "&Wfr",
+ "description": "Bad named entity: Wfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Wfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Wfr;",
+ "description": "Named entity: Wfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd1a"
+ ]
+ ]
+ },
+ {
+ "input": "&Wopf",
+ "description": "Bad named entity: Wopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Wopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Wopf;",
+ "description": "Named entity: Wopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd4e"
+ ]
+ ]
+ },
+ {
+ "input": "&Wscr",
+ "description": "Bad named entity: Wscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Wscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Wscr;",
+ "description": "Named entity: Wscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb2"
+ ]
+ ]
+ },
+ {
+ "input": "&Xfr",
+ "description": "Bad named entity: Xfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Xfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Xfr;",
+ "description": "Named entity: Xfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd1b"
+ ]
+ ]
+ },
+ {
+ "input": "&Xi",
+ "description": "Bad named entity: Xi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Xi"
+ ]
+ ]
+ },
+ {
+ "input": "&Xi;",
+ "description": "Named entity: Xi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u039e"
+ ]
+ ]
+ },
+ {
+ "input": "&Xopf",
+ "description": "Bad named entity: Xopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Xopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Xopf;",
+ "description": "Named entity: Xopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd4f"
+ ]
+ ]
+ },
+ {
+ "input": "&Xscr",
+ "description": "Bad named entity: Xscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Xscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Xscr;",
+ "description": "Named entity: Xscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb3"
+ ]
+ ]
+ },
+ {
+ "input": "&YAcy",
+ "description": "Bad named entity: YAcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&YAcy"
+ ]
+ ]
+ },
+ {
+ "input": "&YAcy;",
+ "description": "Named entity: YAcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u042f"
+ ]
+ ]
+ },
+ {
+ "input": "&YIcy",
+ "description": "Bad named entity: YIcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&YIcy"
+ ]
+ ]
+ },
+ {
+ "input": "&YIcy;",
+ "description": "Named entity: YIcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0407"
+ ]
+ ]
+ },
+ {
+ "input": "&YUcy",
+ "description": "Bad named entity: YUcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&YUcy"
+ ]
+ ]
+ },
+ {
+ "input": "&YUcy;",
+ "description": "Named entity: YUcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u042e"
+ ]
+ ]
+ },
+ {
+ "input": "&Yacute",
+ "description": "Named entity: Yacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00dd"
+ ]
+ ]
+ },
+ {
+ "input": "&Yacute;",
+ "description": "Named entity: Yacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00dd"
+ ]
+ ]
+ },
+ {
+ "input": "&Ycirc",
+ "description": "Bad named entity: Ycirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ycirc"
+ ]
+ ]
+ },
+ {
+ "input": "&Ycirc;",
+ "description": "Named entity: Ycirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0176"
+ ]
+ ]
+ },
+ {
+ "input": "&Ycy",
+ "description": "Bad named entity: Ycy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Ycy"
+ ]
+ ]
+ },
+ {
+ "input": "&Ycy;",
+ "description": "Named entity: Ycy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u042b"
+ ]
+ ]
+ },
+ {
+ "input": "&Yfr",
+ "description": "Bad named entity: Yfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Yfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Yfr;",
+ "description": "Named entity: Yfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd1c"
+ ]
+ ]
+ },
+ {
+ "input": "&Yopf",
+ "description": "Bad named entity: Yopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Yopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Yopf;",
+ "description": "Named entity: Yopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd50"
+ ]
+ ]
+ },
+ {
+ "input": "&Yscr",
+ "description": "Bad named entity: Yscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Yscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Yscr;",
+ "description": "Named entity: Yscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb4"
+ ]
+ ]
+ },
+ {
+ "input": "&Yuml",
+ "description": "Bad named entity: Yuml without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Yuml"
+ ]
+ ]
+ },
+ {
+ "input": "&Yuml;",
+ "description": "Named entity: Yuml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0178"
+ ]
+ ]
+ },
+ {
+ "input": "&ZHcy",
+ "description": "Bad named entity: ZHcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ZHcy"
+ ]
+ ]
+ },
+ {
+ "input": "&ZHcy;",
+ "description": "Named entity: ZHcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0416"
+ ]
+ ]
+ },
+ {
+ "input": "&Zacute",
+ "description": "Bad named entity: Zacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zacute"
+ ]
+ ]
+ },
+ {
+ "input": "&Zacute;",
+ "description": "Named entity: Zacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0179"
+ ]
+ ]
+ },
+ {
+ "input": "&Zcaron",
+ "description": "Bad named entity: Zcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&Zcaron;",
+ "description": "Named entity: Zcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u017d"
+ ]
+ ]
+ },
+ {
+ "input": "&Zcy",
+ "description": "Bad named entity: Zcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zcy"
+ ]
+ ]
+ },
+ {
+ "input": "&Zcy;",
+ "description": "Named entity: Zcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0417"
+ ]
+ ]
+ },
+ {
+ "input": "&Zdot",
+ "description": "Bad named entity: Zdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zdot"
+ ]
+ ]
+ },
+ {
+ "input": "&Zdot;",
+ "description": "Named entity: Zdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u017b"
+ ]
+ ]
+ },
+ {
+ "input": "&ZeroWidthSpace",
+ "description": "Bad named entity: ZeroWidthSpace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ZeroWidthSpace"
+ ]
+ ]
+ },
+ {
+ "input": "&ZeroWidthSpace;",
+ "description": "Named entity: ZeroWidthSpace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200b"
+ ]
+ ]
+ },
+ {
+ "input": "&Zeta",
+ "description": "Bad named entity: Zeta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zeta"
+ ]
+ ]
+ },
+ {
+ "input": "&Zeta;",
+ "description": "Named entity: Zeta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0396"
+ ]
+ ]
+ },
+ {
+ "input": "&Zfr",
+ "description": "Bad named entity: Zfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zfr"
+ ]
+ ]
+ },
+ {
+ "input": "&Zfr;",
+ "description": "Named entity: Zfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2128"
+ ]
+ ]
+ },
+ {
+ "input": "&Zopf",
+ "description": "Bad named entity: Zopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zopf"
+ ]
+ ]
+ },
+ {
+ "input": "&Zopf;",
+ "description": "Named entity: Zopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2124"
+ ]
+ ]
+ },
+ {
+ "input": "&Zscr",
+ "description": "Bad named entity: Zscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&Zscr"
+ ]
+ ]
+ },
+ {
+ "input": "&Zscr;",
+ "description": "Named entity: Zscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb5"
+ ]
+ ]
+ },
+ {
+ "input": "&aacute",
+ "description": "Named entity: aacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e1"
+ ]
+ ]
+ },
+ {
+ "input": "&aacute;",
+ "description": "Named entity: aacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e1"
+ ]
+ ]
+ },
+ {
+ "input": "&abreve",
+ "description": "Bad named entity: abreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&abreve"
+ ]
+ ]
+ },
+ {
+ "input": "&abreve;",
+ "description": "Named entity: abreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0103"
+ ]
+ ]
+ },
+ {
+ "input": "&ac",
+ "description": "Bad named entity: ac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ac"
+ ]
+ ]
+ },
+ {
+ "input": "&ac;",
+ "description": "Named entity: ac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223e"
+ ]
+ ]
+ },
+ {
+ "input": "&acE",
+ "description": "Bad named entity: acE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&acE"
+ ]
+ ]
+ },
+ {
+ "input": "&acE;",
+ "description": "Named entity: acE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223e\u0333"
+ ]
+ ]
+ },
+ {
+ "input": "&acd",
+ "description": "Bad named entity: acd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&acd"
+ ]
+ ]
+ },
+ {
+ "input": "&acd;",
+ "description": "Named entity: acd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223f"
+ ]
+ ]
+ },
+ {
+ "input": "&acirc",
+ "description": "Named entity: acirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e2"
+ ]
+ ]
+ },
+ {
+ "input": "&acirc;",
+ "description": "Named entity: acirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e2"
+ ]
+ ]
+ },
+ {
+ "input": "&acute",
+ "description": "Named entity: acute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b4"
+ ]
+ ]
+ },
+ {
+ "input": "&acute;",
+ "description": "Named entity: acute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b4"
+ ]
+ ]
+ },
+ {
+ "input": "&acy",
+ "description": "Bad named entity: acy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&acy"
+ ]
+ ]
+ },
+ {
+ "input": "&acy;",
+ "description": "Named entity: acy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0430"
+ ]
+ ]
+ },
+ {
+ "input": "&aelig",
+ "description": "Named entity: aelig without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e6"
+ ]
+ ]
+ },
+ {
+ "input": "&aelig;",
+ "description": "Named entity: aelig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e6"
+ ]
+ ]
+ },
+ {
+ "input": "&af",
+ "description": "Bad named entity: af without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&af"
+ ]
+ ]
+ },
+ {
+ "input": "&af;",
+ "description": "Named entity: af; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2061"
+ ]
+ ]
+ },
+ {
+ "input": "&afr",
+ "description": "Bad named entity: afr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&afr"
+ ]
+ ]
+ },
+ {
+ "input": "&afr;",
+ "description": "Named entity: afr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd1e"
+ ]
+ ]
+ },
+ {
+ "input": "&agrave",
+ "description": "Named entity: agrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e0"
+ ]
+ ]
+ },
+ {
+ "input": "&agrave;",
+ "description": "Named entity: agrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e0"
+ ]
+ ]
+ },
+ {
+ "input": "&alefsym",
+ "description": "Bad named entity: alefsym without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&alefsym"
+ ]
+ ]
+ },
+ {
+ "input": "&alefsym;",
+ "description": "Named entity: alefsym; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2135"
+ ]
+ ]
+ },
+ {
+ "input": "&aleph",
+ "description": "Bad named entity: aleph without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&aleph"
+ ]
+ ]
+ },
+ {
+ "input": "&aleph;",
+ "description": "Named entity: aleph; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2135"
+ ]
+ ]
+ },
+ {
+ "input": "&alpha",
+ "description": "Bad named entity: alpha without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&alpha"
+ ]
+ ]
+ },
+ {
+ "input": "&alpha;",
+ "description": "Named entity: alpha; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b1"
+ ]
+ ]
+ },
+ {
+ "input": "&amacr",
+ "description": "Bad named entity: amacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&amacr"
+ ]
+ ]
+ },
+ {
+ "input": "&amacr;",
+ "description": "Named entity: amacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0101"
+ ]
+ ]
+ },
+ {
+ "input": "&amalg",
+ "description": "Bad named entity: amalg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&amalg"
+ ]
+ ]
+ },
+ {
+ "input": "&amalg;",
+ "description": "Named entity: amalg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a3f"
+ ]
+ ]
+ },
+ {
+ "input": "&amp",
+ "description": "Named entity: amp without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "&"
+ ]
+ ]
+ },
+ {
+ "input": "&amp;",
+ "description": "Named entity: amp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&"
+ ]
+ ]
+ },
+ {
+ "input": "&and",
+ "description": "Bad named entity: and without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&and"
+ ]
+ ]
+ },
+ {
+ "input": "&and;",
+ "description": "Named entity: and; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2227"
+ ]
+ ]
+ },
+ {
+ "input": "&andand",
+ "description": "Bad named entity: andand without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&andand"
+ ]
+ ]
+ },
+ {
+ "input": "&andand;",
+ "description": "Named entity: andand; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a55"
+ ]
+ ]
+ },
+ {
+ "input": "&andd",
+ "description": "Bad named entity: andd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&andd"
+ ]
+ ]
+ },
+ {
+ "input": "&andd;",
+ "description": "Named entity: andd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a5c"
+ ]
+ ]
+ },
+ {
+ "input": "&andslope",
+ "description": "Bad named entity: andslope without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&andslope"
+ ]
+ ]
+ },
+ {
+ "input": "&andslope;",
+ "description": "Named entity: andslope; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a58"
+ ]
+ ]
+ },
+ {
+ "input": "&andv",
+ "description": "Bad named entity: andv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&andv"
+ ]
+ ]
+ },
+ {
+ "input": "&andv;",
+ "description": "Named entity: andv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a5a"
+ ]
+ ]
+ },
+ {
+ "input": "&ang",
+ "description": "Bad named entity: ang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ang"
+ ]
+ ]
+ },
+ {
+ "input": "&ang;",
+ "description": "Named entity: ang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2220"
+ ]
+ ]
+ },
+ {
+ "input": "&ange",
+ "description": "Bad named entity: ange without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ange"
+ ]
+ ]
+ },
+ {
+ "input": "&ange;",
+ "description": "Named entity: ange; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29a4"
+ ]
+ ]
+ },
+ {
+ "input": "&angle",
+ "description": "Bad named entity: angle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angle"
+ ]
+ ]
+ },
+ {
+ "input": "&angle;",
+ "description": "Named entity: angle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2220"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsd",
+ "description": "Bad named entity: angmsd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsd"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsd;",
+ "description": "Named entity: angmsd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2221"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdaa",
+ "description": "Bad named entity: angmsdaa without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdaa"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdaa;",
+ "description": "Named entity: angmsdaa; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29a8"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdab",
+ "description": "Bad named entity: angmsdab without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdab"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdab;",
+ "description": "Named entity: angmsdab; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29a9"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdac",
+ "description": "Bad named entity: angmsdac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdac"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdac;",
+ "description": "Named entity: angmsdac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29aa"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdad",
+ "description": "Bad named entity: angmsdad without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdad"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdad;",
+ "description": "Named entity: angmsdad; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29ab"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdae",
+ "description": "Bad named entity: angmsdae without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdae"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdae;",
+ "description": "Named entity: angmsdae; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29ac"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdaf",
+ "description": "Bad named entity: angmsdaf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdaf"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdaf;",
+ "description": "Named entity: angmsdaf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29ad"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdag",
+ "description": "Bad named entity: angmsdag without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdag"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdag;",
+ "description": "Named entity: angmsdag; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29ae"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdah",
+ "description": "Bad named entity: angmsdah without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angmsdah"
+ ]
+ ]
+ },
+ {
+ "input": "&angmsdah;",
+ "description": "Named entity: angmsdah; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29af"
+ ]
+ ]
+ },
+ {
+ "input": "&angrt",
+ "description": "Bad named entity: angrt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angrt"
+ ]
+ ]
+ },
+ {
+ "input": "&angrt;",
+ "description": "Named entity: angrt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221f"
+ ]
+ ]
+ },
+ {
+ "input": "&angrtvb",
+ "description": "Bad named entity: angrtvb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angrtvb"
+ ]
+ ]
+ },
+ {
+ "input": "&angrtvb;",
+ "description": "Named entity: angrtvb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22be"
+ ]
+ ]
+ },
+ {
+ "input": "&angrtvbd",
+ "description": "Bad named entity: angrtvbd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angrtvbd"
+ ]
+ ]
+ },
+ {
+ "input": "&angrtvbd;",
+ "description": "Named entity: angrtvbd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u299d"
+ ]
+ ]
+ },
+ {
+ "input": "&angsph",
+ "description": "Bad named entity: angsph without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angsph"
+ ]
+ ]
+ },
+ {
+ "input": "&angsph;",
+ "description": "Named entity: angsph; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2222"
+ ]
+ ]
+ },
+ {
+ "input": "&angst",
+ "description": "Bad named entity: angst without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angst"
+ ]
+ ]
+ },
+ {
+ "input": "&angst;",
+ "description": "Named entity: angst; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00c5"
+ ]
+ ]
+ },
+ {
+ "input": "&angzarr",
+ "description": "Bad named entity: angzarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&angzarr"
+ ]
+ ]
+ },
+ {
+ "input": "&angzarr;",
+ "description": "Named entity: angzarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u237c"
+ ]
+ ]
+ },
+ {
+ "input": "&aogon",
+ "description": "Bad named entity: aogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&aogon"
+ ]
+ ]
+ },
+ {
+ "input": "&aogon;",
+ "description": "Named entity: aogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0105"
+ ]
+ ]
+ },
+ {
+ "input": "&aopf",
+ "description": "Bad named entity: aopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&aopf"
+ ]
+ ]
+ },
+ {
+ "input": "&aopf;",
+ "description": "Named entity: aopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd52"
+ ]
+ ]
+ },
+ {
+ "input": "&ap",
+ "description": "Bad named entity: ap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ap"
+ ]
+ ]
+ },
+ {
+ "input": "&ap;",
+ "description": "Named entity: ap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2248"
+ ]
+ ]
+ },
+ {
+ "input": "&apE",
+ "description": "Bad named entity: apE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&apE"
+ ]
+ ]
+ },
+ {
+ "input": "&apE;",
+ "description": "Named entity: apE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a70"
+ ]
+ ]
+ },
+ {
+ "input": "&apacir",
+ "description": "Bad named entity: apacir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&apacir"
+ ]
+ ]
+ },
+ {
+ "input": "&apacir;",
+ "description": "Named entity: apacir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a6f"
+ ]
+ ]
+ },
+ {
+ "input": "&ape",
+ "description": "Bad named entity: ape without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ape"
+ ]
+ ]
+ },
+ {
+ "input": "&ape;",
+ "description": "Named entity: ape; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224a"
+ ]
+ ]
+ },
+ {
+ "input": "&apid",
+ "description": "Bad named entity: apid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&apid"
+ ]
+ ]
+ },
+ {
+ "input": "&apid;",
+ "description": "Named entity: apid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224b"
+ ]
+ ]
+ },
+ {
+ "input": "&apos",
+ "description": "Bad named entity: apos without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&apos"
+ ]
+ ]
+ },
+ {
+ "input": "&apos;",
+ "description": "Named entity: apos; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "'"
+ ]
+ ]
+ },
+ {
+ "input": "&approx",
+ "description": "Bad named entity: approx without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&approx"
+ ]
+ ]
+ },
+ {
+ "input": "&approx;",
+ "description": "Named entity: approx; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2248"
+ ]
+ ]
+ },
+ {
+ "input": "&approxeq",
+ "description": "Bad named entity: approxeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&approxeq"
+ ]
+ ]
+ },
+ {
+ "input": "&approxeq;",
+ "description": "Named entity: approxeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224a"
+ ]
+ ]
+ },
+ {
+ "input": "&aring",
+ "description": "Named entity: aring without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e5"
+ ]
+ ]
+ },
+ {
+ "input": "&aring;",
+ "description": "Named entity: aring; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e5"
+ ]
+ ]
+ },
+ {
+ "input": "&ascr",
+ "description": "Bad named entity: ascr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ascr"
+ ]
+ ]
+ },
+ {
+ "input": "&ascr;",
+ "description": "Named entity: ascr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb6"
+ ]
+ ]
+ },
+ {
+ "input": "&ast",
+ "description": "Bad named entity: ast without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ast"
+ ]
+ ]
+ },
+ {
+ "input": "&ast;",
+ "description": "Named entity: ast; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "*"
+ ]
+ ]
+ },
+ {
+ "input": "&asymp",
+ "description": "Bad named entity: asymp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&asymp"
+ ]
+ ]
+ },
+ {
+ "input": "&asymp;",
+ "description": "Named entity: asymp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2248"
+ ]
+ ]
+ },
+ {
+ "input": "&asympeq",
+ "description": "Bad named entity: asympeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&asympeq"
+ ]
+ ]
+ },
+ {
+ "input": "&asympeq;",
+ "description": "Named entity: asympeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224d"
+ ]
+ ]
+ },
+ {
+ "input": "&atilde",
+ "description": "Named entity: atilde without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e3"
+ ]
+ ]
+ },
+ {
+ "input": "&atilde;",
+ "description": "Named entity: atilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e3"
+ ]
+ ]
+ },
+ {
+ "input": "&auml",
+ "description": "Named entity: auml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e4"
+ ]
+ ]
+ },
+ {
+ "input": "&auml;",
+ "description": "Named entity: auml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e4"
+ ]
+ ]
+ },
+ {
+ "input": "&awconint",
+ "description": "Bad named entity: awconint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&awconint"
+ ]
+ ]
+ },
+ {
+ "input": "&awconint;",
+ "description": "Named entity: awconint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2233"
+ ]
+ ]
+ },
+ {
+ "input": "&awint",
+ "description": "Bad named entity: awint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&awint"
+ ]
+ ]
+ },
+ {
+ "input": "&awint;",
+ "description": "Named entity: awint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a11"
+ ]
+ ]
+ },
+ {
+ "input": "&bNot",
+ "description": "Bad named entity: bNot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bNot"
+ ]
+ ]
+ },
+ {
+ "input": "&bNot;",
+ "description": "Named entity: bNot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aed"
+ ]
+ ]
+ },
+ {
+ "input": "&backcong",
+ "description": "Bad named entity: backcong without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&backcong"
+ ]
+ ]
+ },
+ {
+ "input": "&backcong;",
+ "description": "Named entity: backcong; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224c"
+ ]
+ ]
+ },
+ {
+ "input": "&backepsilon",
+ "description": "Bad named entity: backepsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&backepsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&backepsilon;",
+ "description": "Named entity: backepsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f6"
+ ]
+ ]
+ },
+ {
+ "input": "&backprime",
+ "description": "Bad named entity: backprime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&backprime"
+ ]
+ ]
+ },
+ {
+ "input": "&backprime;",
+ "description": "Named entity: backprime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2035"
+ ]
+ ]
+ },
+ {
+ "input": "&backsim",
+ "description": "Bad named entity: backsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&backsim"
+ ]
+ ]
+ },
+ {
+ "input": "&backsim;",
+ "description": "Named entity: backsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223d"
+ ]
+ ]
+ },
+ {
+ "input": "&backsimeq",
+ "description": "Bad named entity: backsimeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&backsimeq"
+ ]
+ ]
+ },
+ {
+ "input": "&backsimeq;",
+ "description": "Named entity: backsimeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cd"
+ ]
+ ]
+ },
+ {
+ "input": "&barvee",
+ "description": "Bad named entity: barvee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&barvee"
+ ]
+ ]
+ },
+ {
+ "input": "&barvee;",
+ "description": "Named entity: barvee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22bd"
+ ]
+ ]
+ },
+ {
+ "input": "&barwed",
+ "description": "Bad named entity: barwed without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&barwed"
+ ]
+ ]
+ },
+ {
+ "input": "&barwed;",
+ "description": "Named entity: barwed; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2305"
+ ]
+ ]
+ },
+ {
+ "input": "&barwedge",
+ "description": "Bad named entity: barwedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&barwedge"
+ ]
+ ]
+ },
+ {
+ "input": "&barwedge;",
+ "description": "Named entity: barwedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2305"
+ ]
+ ]
+ },
+ {
+ "input": "&bbrk",
+ "description": "Bad named entity: bbrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bbrk"
+ ]
+ ]
+ },
+ {
+ "input": "&bbrk;",
+ "description": "Named entity: bbrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b5"
+ ]
+ ]
+ },
+ {
+ "input": "&bbrktbrk",
+ "description": "Bad named entity: bbrktbrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bbrktbrk"
+ ]
+ ]
+ },
+ {
+ "input": "&bbrktbrk;",
+ "description": "Named entity: bbrktbrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b6"
+ ]
+ ]
+ },
+ {
+ "input": "&bcong",
+ "description": "Bad named entity: bcong without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bcong"
+ ]
+ ]
+ },
+ {
+ "input": "&bcong;",
+ "description": "Named entity: bcong; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224c"
+ ]
+ ]
+ },
+ {
+ "input": "&bcy",
+ "description": "Bad named entity: bcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bcy"
+ ]
+ ]
+ },
+ {
+ "input": "&bcy;",
+ "description": "Named entity: bcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0431"
+ ]
+ ]
+ },
+ {
+ "input": "&bdquo",
+ "description": "Bad named entity: bdquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bdquo"
+ ]
+ ]
+ },
+ {
+ "input": "&bdquo;",
+ "description": "Named entity: bdquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201e"
+ ]
+ ]
+ },
+ {
+ "input": "&becaus",
+ "description": "Bad named entity: becaus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&becaus"
+ ]
+ ]
+ },
+ {
+ "input": "&becaus;",
+ "description": "Named entity: becaus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2235"
+ ]
+ ]
+ },
+ {
+ "input": "&because",
+ "description": "Bad named entity: because without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&because"
+ ]
+ ]
+ },
+ {
+ "input": "&because;",
+ "description": "Named entity: because; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2235"
+ ]
+ ]
+ },
+ {
+ "input": "&bemptyv",
+ "description": "Bad named entity: bemptyv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bemptyv"
+ ]
+ ]
+ },
+ {
+ "input": "&bemptyv;",
+ "description": "Named entity: bemptyv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b0"
+ ]
+ ]
+ },
+ {
+ "input": "&bepsi",
+ "description": "Bad named entity: bepsi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bepsi"
+ ]
+ ]
+ },
+ {
+ "input": "&bepsi;",
+ "description": "Named entity: bepsi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f6"
+ ]
+ ]
+ },
+ {
+ "input": "&bernou",
+ "description": "Bad named entity: bernou without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bernou"
+ ]
+ ]
+ },
+ {
+ "input": "&bernou;",
+ "description": "Named entity: bernou; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u212c"
+ ]
+ ]
+ },
+ {
+ "input": "&beta",
+ "description": "Bad named entity: beta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&beta"
+ ]
+ ]
+ },
+ {
+ "input": "&beta;",
+ "description": "Named entity: beta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b2"
+ ]
+ ]
+ },
+ {
+ "input": "&beth",
+ "description": "Bad named entity: beth without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&beth"
+ ]
+ ]
+ },
+ {
+ "input": "&beth;",
+ "description": "Named entity: beth; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2136"
+ ]
+ ]
+ },
+ {
+ "input": "&between",
+ "description": "Bad named entity: between without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&between"
+ ]
+ ]
+ },
+ {
+ "input": "&between;",
+ "description": "Named entity: between; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226c"
+ ]
+ ]
+ },
+ {
+ "input": "&bfr",
+ "description": "Bad named entity: bfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bfr"
+ ]
+ ]
+ },
+ {
+ "input": "&bfr;",
+ "description": "Named entity: bfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd1f"
+ ]
+ ]
+ },
+ {
+ "input": "&bigcap",
+ "description": "Bad named entity: bigcap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigcap"
+ ]
+ ]
+ },
+ {
+ "input": "&bigcap;",
+ "description": "Named entity: bigcap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c2"
+ ]
+ ]
+ },
+ {
+ "input": "&bigcirc",
+ "description": "Bad named entity: bigcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&bigcirc;",
+ "description": "Named entity: bigcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ef"
+ ]
+ ]
+ },
+ {
+ "input": "&bigcup",
+ "description": "Bad named entity: bigcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigcup"
+ ]
+ ]
+ },
+ {
+ "input": "&bigcup;",
+ "description": "Named entity: bigcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c3"
+ ]
+ ]
+ },
+ {
+ "input": "&bigodot",
+ "description": "Bad named entity: bigodot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigodot"
+ ]
+ ]
+ },
+ {
+ "input": "&bigodot;",
+ "description": "Named entity: bigodot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a00"
+ ]
+ ]
+ },
+ {
+ "input": "&bigoplus",
+ "description": "Bad named entity: bigoplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigoplus"
+ ]
+ ]
+ },
+ {
+ "input": "&bigoplus;",
+ "description": "Named entity: bigoplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a01"
+ ]
+ ]
+ },
+ {
+ "input": "&bigotimes",
+ "description": "Bad named entity: bigotimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigotimes"
+ ]
+ ]
+ },
+ {
+ "input": "&bigotimes;",
+ "description": "Named entity: bigotimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a02"
+ ]
+ ]
+ },
+ {
+ "input": "&bigsqcup",
+ "description": "Bad named entity: bigsqcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigsqcup"
+ ]
+ ]
+ },
+ {
+ "input": "&bigsqcup;",
+ "description": "Named entity: bigsqcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a06"
+ ]
+ ]
+ },
+ {
+ "input": "&bigstar",
+ "description": "Bad named entity: bigstar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigstar"
+ ]
+ ]
+ },
+ {
+ "input": "&bigstar;",
+ "description": "Named entity: bigstar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2605"
+ ]
+ ]
+ },
+ {
+ "input": "&bigtriangledown",
+ "description": "Bad named entity: bigtriangledown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigtriangledown"
+ ]
+ ]
+ },
+ {
+ "input": "&bigtriangledown;",
+ "description": "Named entity: bigtriangledown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25bd"
+ ]
+ ]
+ },
+ {
+ "input": "&bigtriangleup",
+ "description": "Bad named entity: bigtriangleup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigtriangleup"
+ ]
+ ]
+ },
+ {
+ "input": "&bigtriangleup;",
+ "description": "Named entity: bigtriangleup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b3"
+ ]
+ ]
+ },
+ {
+ "input": "&biguplus",
+ "description": "Bad named entity: biguplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&biguplus"
+ ]
+ ]
+ },
+ {
+ "input": "&biguplus;",
+ "description": "Named entity: biguplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a04"
+ ]
+ ]
+ },
+ {
+ "input": "&bigvee",
+ "description": "Bad named entity: bigvee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigvee"
+ ]
+ ]
+ },
+ {
+ "input": "&bigvee;",
+ "description": "Named entity: bigvee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c1"
+ ]
+ ]
+ },
+ {
+ "input": "&bigwedge",
+ "description": "Bad named entity: bigwedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bigwedge"
+ ]
+ ]
+ },
+ {
+ "input": "&bigwedge;",
+ "description": "Named entity: bigwedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c0"
+ ]
+ ]
+ },
+ {
+ "input": "&bkarow",
+ "description": "Bad named entity: bkarow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bkarow"
+ ]
+ ]
+ },
+ {
+ "input": "&bkarow;",
+ "description": "Named entity: bkarow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u290d"
+ ]
+ ]
+ },
+ {
+ "input": "&blacklozenge",
+ "description": "Bad named entity: blacklozenge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blacklozenge"
+ ]
+ ]
+ },
+ {
+ "input": "&blacklozenge;",
+ "description": "Named entity: blacklozenge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29eb"
+ ]
+ ]
+ },
+ {
+ "input": "&blacksquare",
+ "description": "Bad named entity: blacksquare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blacksquare"
+ ]
+ ]
+ },
+ {
+ "input": "&blacksquare;",
+ "description": "Named entity: blacksquare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25aa"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangle",
+ "description": "Bad named entity: blacktriangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blacktriangle"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangle;",
+ "description": "Named entity: blacktriangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b4"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangledown",
+ "description": "Bad named entity: blacktriangledown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blacktriangledown"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangledown;",
+ "description": "Named entity: blacktriangledown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25be"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangleleft",
+ "description": "Bad named entity: blacktriangleleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blacktriangleleft"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangleleft;",
+ "description": "Named entity: blacktriangleleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25c2"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangleright",
+ "description": "Bad named entity: blacktriangleright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blacktriangleright"
+ ]
+ ]
+ },
+ {
+ "input": "&blacktriangleright;",
+ "description": "Named entity: blacktriangleright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b8"
+ ]
+ ]
+ },
+ {
+ "input": "&blank",
+ "description": "Bad named entity: blank without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blank"
+ ]
+ ]
+ },
+ {
+ "input": "&blank;",
+ "description": "Named entity: blank; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2423"
+ ]
+ ]
+ },
+ {
+ "input": "&blk12",
+ "description": "Bad named entity: blk12 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blk12"
+ ]
+ ]
+ },
+ {
+ "input": "&blk12;",
+ "description": "Named entity: blk12; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2592"
+ ]
+ ]
+ },
+ {
+ "input": "&blk14",
+ "description": "Bad named entity: blk14 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blk14"
+ ]
+ ]
+ },
+ {
+ "input": "&blk14;",
+ "description": "Named entity: blk14; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2591"
+ ]
+ ]
+ },
+ {
+ "input": "&blk34",
+ "description": "Bad named entity: blk34 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&blk34"
+ ]
+ ]
+ },
+ {
+ "input": "&blk34;",
+ "description": "Named entity: blk34; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2593"
+ ]
+ ]
+ },
+ {
+ "input": "&block",
+ "description": "Bad named entity: block without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&block"
+ ]
+ ]
+ },
+ {
+ "input": "&block;",
+ "description": "Named entity: block; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2588"
+ ]
+ ]
+ },
+ {
+ "input": "&bne",
+ "description": "Bad named entity: bne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bne"
+ ]
+ ]
+ },
+ {
+ "input": "&bne;",
+ "description": "Named entity: bne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "=\u20e5"
+ ]
+ ]
+ },
+ {
+ "input": "&bnequiv",
+ "description": "Bad named entity: bnequiv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bnequiv"
+ ]
+ ]
+ },
+ {
+ "input": "&bnequiv;",
+ "description": "Named entity: bnequiv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2261\u20e5"
+ ]
+ ]
+ },
+ {
+ "input": "&bnot",
+ "description": "Bad named entity: bnot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bnot"
+ ]
+ ]
+ },
+ {
+ "input": "&bnot;",
+ "description": "Named entity: bnot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2310"
+ ]
+ ]
+ },
+ {
+ "input": "&bopf",
+ "description": "Bad named entity: bopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bopf"
+ ]
+ ]
+ },
+ {
+ "input": "&bopf;",
+ "description": "Named entity: bopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd53"
+ ]
+ ]
+ },
+ {
+ "input": "&bot",
+ "description": "Bad named entity: bot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bot"
+ ]
+ ]
+ },
+ {
+ "input": "&bot;",
+ "description": "Named entity: bot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a5"
+ ]
+ ]
+ },
+ {
+ "input": "&bottom",
+ "description": "Bad named entity: bottom without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bottom"
+ ]
+ ]
+ },
+ {
+ "input": "&bottom;",
+ "description": "Named entity: bottom; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a5"
+ ]
+ ]
+ },
+ {
+ "input": "&bowtie",
+ "description": "Bad named entity: bowtie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bowtie"
+ ]
+ ]
+ },
+ {
+ "input": "&bowtie;",
+ "description": "Named entity: bowtie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c8"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDL",
+ "description": "Bad named entity: boxDL without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxDL"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDL;",
+ "description": "Named entity: boxDL; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2557"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDR",
+ "description": "Bad named entity: boxDR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxDR"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDR;",
+ "description": "Named entity: boxDR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2554"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDl",
+ "description": "Bad named entity: boxDl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxDl"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDl;",
+ "description": "Named entity: boxDl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2556"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDr",
+ "description": "Bad named entity: boxDr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxDr"
+ ]
+ ]
+ },
+ {
+ "input": "&boxDr;",
+ "description": "Named entity: boxDr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2553"
+ ]
+ ]
+ },
+ {
+ "input": "&boxH",
+ "description": "Bad named entity: boxH without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxH"
+ ]
+ ]
+ },
+ {
+ "input": "&boxH;",
+ "description": "Named entity: boxH; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2550"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHD",
+ "description": "Bad named entity: boxHD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxHD"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHD;",
+ "description": "Named entity: boxHD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2566"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHU",
+ "description": "Bad named entity: boxHU without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxHU"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHU;",
+ "description": "Named entity: boxHU; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2569"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHd",
+ "description": "Bad named entity: boxHd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxHd"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHd;",
+ "description": "Named entity: boxHd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2564"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHu",
+ "description": "Bad named entity: boxHu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxHu"
+ ]
+ ]
+ },
+ {
+ "input": "&boxHu;",
+ "description": "Named entity: boxHu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2567"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUL",
+ "description": "Bad named entity: boxUL without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxUL"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUL;",
+ "description": "Named entity: boxUL; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u255d"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUR",
+ "description": "Bad named entity: boxUR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxUR"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUR;",
+ "description": "Named entity: boxUR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u255a"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUl",
+ "description": "Bad named entity: boxUl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxUl"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUl;",
+ "description": "Named entity: boxUl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u255c"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUr",
+ "description": "Bad named entity: boxUr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxUr"
+ ]
+ ]
+ },
+ {
+ "input": "&boxUr;",
+ "description": "Named entity: boxUr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2559"
+ ]
+ ]
+ },
+ {
+ "input": "&boxV",
+ "description": "Bad named entity: boxV without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxV"
+ ]
+ ]
+ },
+ {
+ "input": "&boxV;",
+ "description": "Named entity: boxV; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2551"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVH",
+ "description": "Bad named entity: boxVH without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxVH"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVH;",
+ "description": "Named entity: boxVH; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u256c"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVL",
+ "description": "Bad named entity: boxVL without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxVL"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVL;",
+ "description": "Named entity: boxVL; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2563"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVR",
+ "description": "Bad named entity: boxVR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxVR"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVR;",
+ "description": "Named entity: boxVR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2560"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVh",
+ "description": "Bad named entity: boxVh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxVh"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVh;",
+ "description": "Named entity: boxVh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u256b"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVl",
+ "description": "Bad named entity: boxVl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxVl"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVl;",
+ "description": "Named entity: boxVl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2562"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVr",
+ "description": "Bad named entity: boxVr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxVr"
+ ]
+ ]
+ },
+ {
+ "input": "&boxVr;",
+ "description": "Named entity: boxVr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u255f"
+ ]
+ ]
+ },
+ {
+ "input": "&boxbox",
+ "description": "Bad named entity: boxbox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxbox"
+ ]
+ ]
+ },
+ {
+ "input": "&boxbox;",
+ "description": "Named entity: boxbox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c9"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdL",
+ "description": "Bad named entity: boxdL without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxdL"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdL;",
+ "description": "Named entity: boxdL; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2555"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdR",
+ "description": "Bad named entity: boxdR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxdR"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdR;",
+ "description": "Named entity: boxdR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2552"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdl",
+ "description": "Bad named entity: boxdl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxdl"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdl;",
+ "description": "Named entity: boxdl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2510"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdr",
+ "description": "Bad named entity: boxdr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxdr"
+ ]
+ ]
+ },
+ {
+ "input": "&boxdr;",
+ "description": "Named entity: boxdr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u250c"
+ ]
+ ]
+ },
+ {
+ "input": "&boxh",
+ "description": "Bad named entity: boxh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxh"
+ ]
+ ]
+ },
+ {
+ "input": "&boxh;",
+ "description": "Named entity: boxh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2500"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhD",
+ "description": "Bad named entity: boxhD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxhD"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhD;",
+ "description": "Named entity: boxhD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2565"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhU",
+ "description": "Bad named entity: boxhU without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxhU"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhU;",
+ "description": "Named entity: boxhU; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2568"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhd",
+ "description": "Bad named entity: boxhd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxhd"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhd;",
+ "description": "Named entity: boxhd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u252c"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhu",
+ "description": "Bad named entity: boxhu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxhu"
+ ]
+ ]
+ },
+ {
+ "input": "&boxhu;",
+ "description": "Named entity: boxhu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2534"
+ ]
+ ]
+ },
+ {
+ "input": "&boxminus",
+ "description": "Bad named entity: boxminus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxminus"
+ ]
+ ]
+ },
+ {
+ "input": "&boxminus;",
+ "description": "Named entity: boxminus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229f"
+ ]
+ ]
+ },
+ {
+ "input": "&boxplus",
+ "description": "Bad named entity: boxplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxplus"
+ ]
+ ]
+ },
+ {
+ "input": "&boxplus;",
+ "description": "Named entity: boxplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229e"
+ ]
+ ]
+ },
+ {
+ "input": "&boxtimes",
+ "description": "Bad named entity: boxtimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxtimes"
+ ]
+ ]
+ },
+ {
+ "input": "&boxtimes;",
+ "description": "Named entity: boxtimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a0"
+ ]
+ ]
+ },
+ {
+ "input": "&boxuL",
+ "description": "Bad named entity: boxuL without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxuL"
+ ]
+ ]
+ },
+ {
+ "input": "&boxuL;",
+ "description": "Named entity: boxuL; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u255b"
+ ]
+ ]
+ },
+ {
+ "input": "&boxuR",
+ "description": "Bad named entity: boxuR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxuR"
+ ]
+ ]
+ },
+ {
+ "input": "&boxuR;",
+ "description": "Named entity: boxuR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2558"
+ ]
+ ]
+ },
+ {
+ "input": "&boxul",
+ "description": "Bad named entity: boxul without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxul"
+ ]
+ ]
+ },
+ {
+ "input": "&boxul;",
+ "description": "Named entity: boxul; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2518"
+ ]
+ ]
+ },
+ {
+ "input": "&boxur",
+ "description": "Bad named entity: boxur without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxur"
+ ]
+ ]
+ },
+ {
+ "input": "&boxur;",
+ "description": "Named entity: boxur; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2514"
+ ]
+ ]
+ },
+ {
+ "input": "&boxv",
+ "description": "Bad named entity: boxv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxv"
+ ]
+ ]
+ },
+ {
+ "input": "&boxv;",
+ "description": "Named entity: boxv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2502"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvH",
+ "description": "Bad named entity: boxvH without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxvH"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvH;",
+ "description": "Named entity: boxvH; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u256a"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvL",
+ "description": "Bad named entity: boxvL without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxvL"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvL;",
+ "description": "Named entity: boxvL; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2561"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvR",
+ "description": "Bad named entity: boxvR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxvR"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvR;",
+ "description": "Named entity: boxvR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u255e"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvh",
+ "description": "Bad named entity: boxvh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxvh"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvh;",
+ "description": "Named entity: boxvh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u253c"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvl",
+ "description": "Bad named entity: boxvl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxvl"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvl;",
+ "description": "Named entity: boxvl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2524"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvr",
+ "description": "Bad named entity: boxvr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&boxvr"
+ ]
+ ]
+ },
+ {
+ "input": "&boxvr;",
+ "description": "Named entity: boxvr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u251c"
+ ]
+ ]
+ },
+ {
+ "input": "&bprime",
+ "description": "Bad named entity: bprime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bprime"
+ ]
+ ]
+ },
+ {
+ "input": "&bprime;",
+ "description": "Named entity: bprime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2035"
+ ]
+ ]
+ },
+ {
+ "input": "&breve",
+ "description": "Bad named entity: breve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&breve"
+ ]
+ ]
+ },
+ {
+ "input": "&breve;",
+ "description": "Named entity: breve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02d8"
+ ]
+ ]
+ },
+ {
+ "input": "&brvbar",
+ "description": "Named entity: brvbar without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a6"
+ ]
+ ]
+ },
+ {
+ "input": "&brvbar;",
+ "description": "Named entity: brvbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a6"
+ ]
+ ]
+ },
+ {
+ "input": "&bscr",
+ "description": "Bad named entity: bscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bscr"
+ ]
+ ]
+ },
+ {
+ "input": "&bscr;",
+ "description": "Named entity: bscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb7"
+ ]
+ ]
+ },
+ {
+ "input": "&bsemi",
+ "description": "Bad named entity: bsemi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bsemi"
+ ]
+ ]
+ },
+ {
+ "input": "&bsemi;",
+ "description": "Named entity: bsemi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u204f"
+ ]
+ ]
+ },
+ {
+ "input": "&bsim",
+ "description": "Bad named entity: bsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bsim"
+ ]
+ ]
+ },
+ {
+ "input": "&bsim;",
+ "description": "Named entity: bsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223d"
+ ]
+ ]
+ },
+ {
+ "input": "&bsime",
+ "description": "Bad named entity: bsime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bsime"
+ ]
+ ]
+ },
+ {
+ "input": "&bsime;",
+ "description": "Named entity: bsime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cd"
+ ]
+ ]
+ },
+ {
+ "input": "&bsol",
+ "description": "Bad named entity: bsol without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bsol"
+ ]
+ ]
+ },
+ {
+ "input": "&bsol;",
+ "description": "Named entity: bsol; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\\"
+ ]
+ ]
+ },
+ {
+ "input": "&bsolb",
+ "description": "Bad named entity: bsolb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bsolb"
+ ]
+ ]
+ },
+ {
+ "input": "&bsolb;",
+ "description": "Named entity: bsolb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c5"
+ ]
+ ]
+ },
+ {
+ "input": "&bsolhsub",
+ "description": "Bad named entity: bsolhsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bsolhsub"
+ ]
+ ]
+ },
+ {
+ "input": "&bsolhsub;",
+ "description": "Named entity: bsolhsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27c8"
+ ]
+ ]
+ },
+ {
+ "input": "&bull",
+ "description": "Bad named entity: bull without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bull"
+ ]
+ ]
+ },
+ {
+ "input": "&bull;",
+ "description": "Named entity: bull; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2022"
+ ]
+ ]
+ },
+ {
+ "input": "&bullet",
+ "description": "Bad named entity: bullet without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bullet"
+ ]
+ ]
+ },
+ {
+ "input": "&bullet;",
+ "description": "Named entity: bullet; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2022"
+ ]
+ ]
+ },
+ {
+ "input": "&bump",
+ "description": "Bad named entity: bump without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bump"
+ ]
+ ]
+ },
+ {
+ "input": "&bump;",
+ "description": "Named entity: bump; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224e"
+ ]
+ ]
+ },
+ {
+ "input": "&bumpE",
+ "description": "Bad named entity: bumpE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bumpE"
+ ]
+ ]
+ },
+ {
+ "input": "&bumpE;",
+ "description": "Named entity: bumpE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aae"
+ ]
+ ]
+ },
+ {
+ "input": "&bumpe",
+ "description": "Bad named entity: bumpe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bumpe"
+ ]
+ ]
+ },
+ {
+ "input": "&bumpe;",
+ "description": "Named entity: bumpe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224f"
+ ]
+ ]
+ },
+ {
+ "input": "&bumpeq",
+ "description": "Bad named entity: bumpeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&bumpeq"
+ ]
+ ]
+ },
+ {
+ "input": "&bumpeq;",
+ "description": "Named entity: bumpeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224f"
+ ]
+ ]
+ },
+ {
+ "input": "&cacute",
+ "description": "Bad named entity: cacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cacute"
+ ]
+ ]
+ },
+ {
+ "input": "&cacute;",
+ "description": "Named entity: cacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0107"
+ ]
+ ]
+ },
+ {
+ "input": "&cap",
+ "description": "Bad named entity: cap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cap"
+ ]
+ ]
+ },
+ {
+ "input": "&cap;",
+ "description": "Named entity: cap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2229"
+ ]
+ ]
+ },
+ {
+ "input": "&capand",
+ "description": "Bad named entity: capand without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&capand"
+ ]
+ ]
+ },
+ {
+ "input": "&capand;",
+ "description": "Named entity: capand; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a44"
+ ]
+ ]
+ },
+ {
+ "input": "&capbrcup",
+ "description": "Bad named entity: capbrcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&capbrcup"
+ ]
+ ]
+ },
+ {
+ "input": "&capbrcup;",
+ "description": "Named entity: capbrcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a49"
+ ]
+ ]
+ },
+ {
+ "input": "&capcap",
+ "description": "Bad named entity: capcap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&capcap"
+ ]
+ ]
+ },
+ {
+ "input": "&capcap;",
+ "description": "Named entity: capcap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a4b"
+ ]
+ ]
+ },
+ {
+ "input": "&capcup",
+ "description": "Bad named entity: capcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&capcup"
+ ]
+ ]
+ },
+ {
+ "input": "&capcup;",
+ "description": "Named entity: capcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a47"
+ ]
+ ]
+ },
+ {
+ "input": "&capdot",
+ "description": "Bad named entity: capdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&capdot"
+ ]
+ ]
+ },
+ {
+ "input": "&capdot;",
+ "description": "Named entity: capdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a40"
+ ]
+ ]
+ },
+ {
+ "input": "&caps",
+ "description": "Bad named entity: caps without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&caps"
+ ]
+ ]
+ },
+ {
+ "input": "&caps;",
+ "description": "Named entity: caps; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2229\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&caret",
+ "description": "Bad named entity: caret without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&caret"
+ ]
+ ]
+ },
+ {
+ "input": "&caret;",
+ "description": "Named entity: caret; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2041"
+ ]
+ ]
+ },
+ {
+ "input": "&caron",
+ "description": "Bad named entity: caron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&caron"
+ ]
+ ]
+ },
+ {
+ "input": "&caron;",
+ "description": "Named entity: caron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02c7"
+ ]
+ ]
+ },
+ {
+ "input": "&ccaps",
+ "description": "Bad named entity: ccaps without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ccaps"
+ ]
+ ]
+ },
+ {
+ "input": "&ccaps;",
+ "description": "Named entity: ccaps; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a4d"
+ ]
+ ]
+ },
+ {
+ "input": "&ccaron",
+ "description": "Bad named entity: ccaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ccaron"
+ ]
+ ]
+ },
+ {
+ "input": "&ccaron;",
+ "description": "Named entity: ccaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u010d"
+ ]
+ ]
+ },
+ {
+ "input": "&ccedil",
+ "description": "Named entity: ccedil without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e7"
+ ]
+ ]
+ },
+ {
+ "input": "&ccedil;",
+ "description": "Named entity: ccedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e7"
+ ]
+ ]
+ },
+ {
+ "input": "&ccirc",
+ "description": "Bad named entity: ccirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ccirc"
+ ]
+ ]
+ },
+ {
+ "input": "&ccirc;",
+ "description": "Named entity: ccirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0109"
+ ]
+ ]
+ },
+ {
+ "input": "&ccups",
+ "description": "Bad named entity: ccups without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ccups"
+ ]
+ ]
+ },
+ {
+ "input": "&ccups;",
+ "description": "Named entity: ccups; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a4c"
+ ]
+ ]
+ },
+ {
+ "input": "&ccupssm",
+ "description": "Bad named entity: ccupssm without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ccupssm"
+ ]
+ ]
+ },
+ {
+ "input": "&ccupssm;",
+ "description": "Named entity: ccupssm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a50"
+ ]
+ ]
+ },
+ {
+ "input": "&cdot",
+ "description": "Bad named entity: cdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cdot"
+ ]
+ ]
+ },
+ {
+ "input": "&cdot;",
+ "description": "Named entity: cdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u010b"
+ ]
+ ]
+ },
+ {
+ "input": "&cedil",
+ "description": "Named entity: cedil without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b8"
+ ]
+ ]
+ },
+ {
+ "input": "&cedil;",
+ "description": "Named entity: cedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b8"
+ ]
+ ]
+ },
+ {
+ "input": "&cemptyv",
+ "description": "Bad named entity: cemptyv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cemptyv"
+ ]
+ ]
+ },
+ {
+ "input": "&cemptyv;",
+ "description": "Named entity: cemptyv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b2"
+ ]
+ ]
+ },
+ {
+ "input": "&cent",
+ "description": "Named entity: cent without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a2"
+ ]
+ ]
+ },
+ {
+ "input": "&cent;",
+ "description": "Named entity: cent; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a2"
+ ]
+ ]
+ },
+ {
+ "input": "&centerdot;",
+ "description": "Named entity: centerdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b7"
+ ]
+ ]
+ },
+ {
+ "input": "&cfr",
+ "description": "Bad named entity: cfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cfr"
+ ]
+ ]
+ },
+ {
+ "input": "&cfr;",
+ "description": "Named entity: cfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd20"
+ ]
+ ]
+ },
+ {
+ "input": "&chcy",
+ "description": "Bad named entity: chcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&chcy"
+ ]
+ ]
+ },
+ {
+ "input": "&chcy;",
+ "description": "Named entity: chcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0447"
+ ]
+ ]
+ },
+ {
+ "input": "&check",
+ "description": "Bad named entity: check without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&check"
+ ]
+ ]
+ },
+ {
+ "input": "&check;",
+ "description": "Named entity: check; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2713"
+ ]
+ ]
+ },
+ {
+ "input": "&checkmark",
+ "description": "Bad named entity: checkmark without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&checkmark"
+ ]
+ ]
+ },
+ {
+ "input": "&checkmark;",
+ "description": "Named entity: checkmark; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2713"
+ ]
+ ]
+ },
+ {
+ "input": "&chi",
+ "description": "Bad named entity: chi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&chi"
+ ]
+ ]
+ },
+ {
+ "input": "&chi;",
+ "description": "Named entity: chi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c7"
+ ]
+ ]
+ },
+ {
+ "input": "&cir",
+ "description": "Bad named entity: cir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cir"
+ ]
+ ]
+ },
+ {
+ "input": "&cir;",
+ "description": "Named entity: cir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25cb"
+ ]
+ ]
+ },
+ {
+ "input": "&cirE",
+ "description": "Bad named entity: cirE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cirE"
+ ]
+ ]
+ },
+ {
+ "input": "&cirE;",
+ "description": "Named entity: cirE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c3"
+ ]
+ ]
+ },
+ {
+ "input": "&circ",
+ "description": "Bad named entity: circ without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circ"
+ ]
+ ]
+ },
+ {
+ "input": "&circ;",
+ "description": "Named entity: circ; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02c6"
+ ]
+ ]
+ },
+ {
+ "input": "&circeq",
+ "description": "Bad named entity: circeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circeq"
+ ]
+ ]
+ },
+ {
+ "input": "&circeq;",
+ "description": "Named entity: circeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2257"
+ ]
+ ]
+ },
+ {
+ "input": "&circlearrowleft",
+ "description": "Bad named entity: circlearrowleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circlearrowleft"
+ ]
+ ]
+ },
+ {
+ "input": "&circlearrowleft;",
+ "description": "Named entity: circlearrowleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ba"
+ ]
+ ]
+ },
+ {
+ "input": "&circlearrowright",
+ "description": "Bad named entity: circlearrowright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circlearrowright"
+ ]
+ ]
+ },
+ {
+ "input": "&circlearrowright;",
+ "description": "Named entity: circlearrowright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bb"
+ ]
+ ]
+ },
+ {
+ "input": "&circledR",
+ "description": "Bad named entity: circledR without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circledR"
+ ]
+ ]
+ },
+ {
+ "input": "&circledR;",
+ "description": "Named entity: circledR; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ae"
+ ]
+ ]
+ },
+ {
+ "input": "&circledS",
+ "description": "Bad named entity: circledS without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circledS"
+ ]
+ ]
+ },
+ {
+ "input": "&circledS;",
+ "description": "Named entity: circledS; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u24c8"
+ ]
+ ]
+ },
+ {
+ "input": "&circledast",
+ "description": "Bad named entity: circledast without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circledast"
+ ]
+ ]
+ },
+ {
+ "input": "&circledast;",
+ "description": "Named entity: circledast; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229b"
+ ]
+ ]
+ },
+ {
+ "input": "&circledcirc",
+ "description": "Bad named entity: circledcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circledcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&circledcirc;",
+ "description": "Named entity: circledcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229a"
+ ]
+ ]
+ },
+ {
+ "input": "&circleddash",
+ "description": "Bad named entity: circleddash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&circleddash"
+ ]
+ ]
+ },
+ {
+ "input": "&circleddash;",
+ "description": "Named entity: circleddash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229d"
+ ]
+ ]
+ },
+ {
+ "input": "&cire",
+ "description": "Bad named entity: cire without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cire"
+ ]
+ ]
+ },
+ {
+ "input": "&cire;",
+ "description": "Named entity: cire; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2257"
+ ]
+ ]
+ },
+ {
+ "input": "&cirfnint",
+ "description": "Bad named entity: cirfnint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cirfnint"
+ ]
+ ]
+ },
+ {
+ "input": "&cirfnint;",
+ "description": "Named entity: cirfnint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a10"
+ ]
+ ]
+ },
+ {
+ "input": "&cirmid",
+ "description": "Bad named entity: cirmid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cirmid"
+ ]
+ ]
+ },
+ {
+ "input": "&cirmid;",
+ "description": "Named entity: cirmid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aef"
+ ]
+ ]
+ },
+ {
+ "input": "&cirscir",
+ "description": "Bad named entity: cirscir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cirscir"
+ ]
+ ]
+ },
+ {
+ "input": "&cirscir;",
+ "description": "Named entity: cirscir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c2"
+ ]
+ ]
+ },
+ {
+ "input": "&clubs",
+ "description": "Bad named entity: clubs without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&clubs"
+ ]
+ ]
+ },
+ {
+ "input": "&clubs;",
+ "description": "Named entity: clubs; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2663"
+ ]
+ ]
+ },
+ {
+ "input": "&clubsuit",
+ "description": "Bad named entity: clubsuit without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&clubsuit"
+ ]
+ ]
+ },
+ {
+ "input": "&clubsuit;",
+ "description": "Named entity: clubsuit; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2663"
+ ]
+ ]
+ },
+ {
+ "input": "&colon",
+ "description": "Bad named entity: colon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&colon"
+ ]
+ ]
+ },
+ {
+ "input": "&colon;",
+ "description": "Named entity: colon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ":"
+ ]
+ ]
+ },
+ {
+ "input": "&colone",
+ "description": "Bad named entity: colone without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&colone"
+ ]
+ ]
+ },
+ {
+ "input": "&colone;",
+ "description": "Named entity: colone; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2254"
+ ]
+ ]
+ },
+ {
+ "input": "&coloneq",
+ "description": "Bad named entity: coloneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&coloneq"
+ ]
+ ]
+ },
+ {
+ "input": "&coloneq;",
+ "description": "Named entity: coloneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2254"
+ ]
+ ]
+ },
+ {
+ "input": "&comma",
+ "description": "Bad named entity: comma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&comma"
+ ]
+ ]
+ },
+ {
+ "input": "&comma;",
+ "description": "Named entity: comma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ","
+ ]
+ ]
+ },
+ {
+ "input": "&commat",
+ "description": "Bad named entity: commat without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&commat"
+ ]
+ ]
+ },
+ {
+ "input": "&commat;",
+ "description": "Named entity: commat; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "@"
+ ]
+ ]
+ },
+ {
+ "input": "&comp",
+ "description": "Bad named entity: comp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&comp"
+ ]
+ ]
+ },
+ {
+ "input": "&comp;",
+ "description": "Named entity: comp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2201"
+ ]
+ ]
+ },
+ {
+ "input": "&compfn",
+ "description": "Bad named entity: compfn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&compfn"
+ ]
+ ]
+ },
+ {
+ "input": "&compfn;",
+ "description": "Named entity: compfn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2218"
+ ]
+ ]
+ },
+ {
+ "input": "&complement",
+ "description": "Bad named entity: complement without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&complement"
+ ]
+ ]
+ },
+ {
+ "input": "&complement;",
+ "description": "Named entity: complement; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2201"
+ ]
+ ]
+ },
+ {
+ "input": "&complexes",
+ "description": "Bad named entity: complexes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&complexes"
+ ]
+ ]
+ },
+ {
+ "input": "&complexes;",
+ "description": "Named entity: complexes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2102"
+ ]
+ ]
+ },
+ {
+ "input": "&cong",
+ "description": "Bad named entity: cong without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cong"
+ ]
+ ]
+ },
+ {
+ "input": "&cong;",
+ "description": "Named entity: cong; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2245"
+ ]
+ ]
+ },
+ {
+ "input": "&congdot",
+ "description": "Bad named entity: congdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&congdot"
+ ]
+ ]
+ },
+ {
+ "input": "&congdot;",
+ "description": "Named entity: congdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a6d"
+ ]
+ ]
+ },
+ {
+ "input": "&conint",
+ "description": "Bad named entity: conint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&conint"
+ ]
+ ]
+ },
+ {
+ "input": "&conint;",
+ "description": "Named entity: conint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222e"
+ ]
+ ]
+ },
+ {
+ "input": "&copf",
+ "description": "Bad named entity: copf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&copf"
+ ]
+ ]
+ },
+ {
+ "input": "&copf;",
+ "description": "Named entity: copf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd54"
+ ]
+ ]
+ },
+ {
+ "input": "&coprod",
+ "description": "Bad named entity: coprod without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&coprod"
+ ]
+ ]
+ },
+ {
+ "input": "&coprod;",
+ "description": "Named entity: coprod; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2210"
+ ]
+ ]
+ },
+ {
+ "input": "&copy",
+ "description": "Named entity: copy without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a9"
+ ]
+ ]
+ },
+ {
+ "input": "&copy;",
+ "description": "Named entity: copy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a9"
+ ]
+ ]
+ },
+ {
+ "input": "&copysr;",
+ "description": "Named entity: copysr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2117"
+ ]
+ ]
+ },
+ {
+ "input": "&crarr",
+ "description": "Bad named entity: crarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&crarr"
+ ]
+ ]
+ },
+ {
+ "input": "&crarr;",
+ "description": "Named entity: crarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b5"
+ ]
+ ]
+ },
+ {
+ "input": "&cross",
+ "description": "Bad named entity: cross without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cross"
+ ]
+ ]
+ },
+ {
+ "input": "&cross;",
+ "description": "Named entity: cross; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2717"
+ ]
+ ]
+ },
+ {
+ "input": "&cscr",
+ "description": "Bad named entity: cscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cscr"
+ ]
+ ]
+ },
+ {
+ "input": "&cscr;",
+ "description": "Named entity: cscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb8"
+ ]
+ ]
+ },
+ {
+ "input": "&csub",
+ "description": "Bad named entity: csub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&csub"
+ ]
+ ]
+ },
+ {
+ "input": "&csub;",
+ "description": "Named entity: csub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acf"
+ ]
+ ]
+ },
+ {
+ "input": "&csube",
+ "description": "Bad named entity: csube without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&csube"
+ ]
+ ]
+ },
+ {
+ "input": "&csube;",
+ "description": "Named entity: csube; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad1"
+ ]
+ ]
+ },
+ {
+ "input": "&csup",
+ "description": "Bad named entity: csup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&csup"
+ ]
+ ]
+ },
+ {
+ "input": "&csup;",
+ "description": "Named entity: csup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad0"
+ ]
+ ]
+ },
+ {
+ "input": "&csupe",
+ "description": "Bad named entity: csupe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&csupe"
+ ]
+ ]
+ },
+ {
+ "input": "&csupe;",
+ "description": "Named entity: csupe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad2"
+ ]
+ ]
+ },
+ {
+ "input": "&ctdot",
+ "description": "Bad named entity: ctdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ctdot"
+ ]
+ ]
+ },
+ {
+ "input": "&ctdot;",
+ "description": "Named entity: ctdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ef"
+ ]
+ ]
+ },
+ {
+ "input": "&cudarrl",
+ "description": "Bad named entity: cudarrl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cudarrl"
+ ]
+ ]
+ },
+ {
+ "input": "&cudarrl;",
+ "description": "Named entity: cudarrl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2938"
+ ]
+ ]
+ },
+ {
+ "input": "&cudarrr",
+ "description": "Bad named entity: cudarrr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cudarrr"
+ ]
+ ]
+ },
+ {
+ "input": "&cudarrr;",
+ "description": "Named entity: cudarrr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2935"
+ ]
+ ]
+ },
+ {
+ "input": "&cuepr",
+ "description": "Bad named entity: cuepr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cuepr"
+ ]
+ ]
+ },
+ {
+ "input": "&cuepr;",
+ "description": "Named entity: cuepr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22de"
+ ]
+ ]
+ },
+ {
+ "input": "&cuesc",
+ "description": "Bad named entity: cuesc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cuesc"
+ ]
+ ]
+ },
+ {
+ "input": "&cuesc;",
+ "description": "Named entity: cuesc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22df"
+ ]
+ ]
+ },
+ {
+ "input": "&cularr",
+ "description": "Bad named entity: cularr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cularr"
+ ]
+ ]
+ },
+ {
+ "input": "&cularr;",
+ "description": "Named entity: cularr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b6"
+ ]
+ ]
+ },
+ {
+ "input": "&cularrp",
+ "description": "Bad named entity: cularrp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cularrp"
+ ]
+ ]
+ },
+ {
+ "input": "&cularrp;",
+ "description": "Named entity: cularrp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u293d"
+ ]
+ ]
+ },
+ {
+ "input": "&cup",
+ "description": "Bad named entity: cup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cup"
+ ]
+ ]
+ },
+ {
+ "input": "&cup;",
+ "description": "Named entity: cup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222a"
+ ]
+ ]
+ },
+ {
+ "input": "&cupbrcap",
+ "description": "Bad named entity: cupbrcap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cupbrcap"
+ ]
+ ]
+ },
+ {
+ "input": "&cupbrcap;",
+ "description": "Named entity: cupbrcap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a48"
+ ]
+ ]
+ },
+ {
+ "input": "&cupcap",
+ "description": "Bad named entity: cupcap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cupcap"
+ ]
+ ]
+ },
+ {
+ "input": "&cupcap;",
+ "description": "Named entity: cupcap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a46"
+ ]
+ ]
+ },
+ {
+ "input": "&cupcup",
+ "description": "Bad named entity: cupcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cupcup"
+ ]
+ ]
+ },
+ {
+ "input": "&cupcup;",
+ "description": "Named entity: cupcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a4a"
+ ]
+ ]
+ },
+ {
+ "input": "&cupdot",
+ "description": "Bad named entity: cupdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cupdot"
+ ]
+ ]
+ },
+ {
+ "input": "&cupdot;",
+ "description": "Named entity: cupdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228d"
+ ]
+ ]
+ },
+ {
+ "input": "&cupor",
+ "description": "Bad named entity: cupor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cupor"
+ ]
+ ]
+ },
+ {
+ "input": "&cupor;",
+ "description": "Named entity: cupor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a45"
+ ]
+ ]
+ },
+ {
+ "input": "&cups",
+ "description": "Bad named entity: cups without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cups"
+ ]
+ ]
+ },
+ {
+ "input": "&cups;",
+ "description": "Named entity: cups; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222a\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&curarr",
+ "description": "Bad named entity: curarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curarr"
+ ]
+ ]
+ },
+ {
+ "input": "&curarr;",
+ "description": "Named entity: curarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b7"
+ ]
+ ]
+ },
+ {
+ "input": "&curarrm",
+ "description": "Bad named entity: curarrm without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curarrm"
+ ]
+ ]
+ },
+ {
+ "input": "&curarrm;",
+ "description": "Named entity: curarrm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u293c"
+ ]
+ ]
+ },
+ {
+ "input": "&curlyeqprec",
+ "description": "Bad named entity: curlyeqprec without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curlyeqprec"
+ ]
+ ]
+ },
+ {
+ "input": "&curlyeqprec;",
+ "description": "Named entity: curlyeqprec; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22de"
+ ]
+ ]
+ },
+ {
+ "input": "&curlyeqsucc",
+ "description": "Bad named entity: curlyeqsucc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curlyeqsucc"
+ ]
+ ]
+ },
+ {
+ "input": "&curlyeqsucc;",
+ "description": "Named entity: curlyeqsucc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22df"
+ ]
+ ]
+ },
+ {
+ "input": "&curlyvee",
+ "description": "Bad named entity: curlyvee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curlyvee"
+ ]
+ ]
+ },
+ {
+ "input": "&curlyvee;",
+ "description": "Named entity: curlyvee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ce"
+ ]
+ ]
+ },
+ {
+ "input": "&curlywedge",
+ "description": "Bad named entity: curlywedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curlywedge"
+ ]
+ ]
+ },
+ {
+ "input": "&curlywedge;",
+ "description": "Named entity: curlywedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cf"
+ ]
+ ]
+ },
+ {
+ "input": "&curren",
+ "description": "Named entity: curren without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a4"
+ ]
+ ]
+ },
+ {
+ "input": "&curren;",
+ "description": "Named entity: curren; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a4"
+ ]
+ ]
+ },
+ {
+ "input": "&curvearrowleft",
+ "description": "Bad named entity: curvearrowleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curvearrowleft"
+ ]
+ ]
+ },
+ {
+ "input": "&curvearrowleft;",
+ "description": "Named entity: curvearrowleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b6"
+ ]
+ ]
+ },
+ {
+ "input": "&curvearrowright",
+ "description": "Bad named entity: curvearrowright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&curvearrowright"
+ ]
+ ]
+ },
+ {
+ "input": "&curvearrowright;",
+ "description": "Named entity: curvearrowright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b7"
+ ]
+ ]
+ },
+ {
+ "input": "&cuvee",
+ "description": "Bad named entity: cuvee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cuvee"
+ ]
+ ]
+ },
+ {
+ "input": "&cuvee;",
+ "description": "Named entity: cuvee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ce"
+ ]
+ ]
+ },
+ {
+ "input": "&cuwed",
+ "description": "Bad named entity: cuwed without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cuwed"
+ ]
+ ]
+ },
+ {
+ "input": "&cuwed;",
+ "description": "Named entity: cuwed; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cf"
+ ]
+ ]
+ },
+ {
+ "input": "&cwconint",
+ "description": "Bad named entity: cwconint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cwconint"
+ ]
+ ]
+ },
+ {
+ "input": "&cwconint;",
+ "description": "Named entity: cwconint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2232"
+ ]
+ ]
+ },
+ {
+ "input": "&cwint",
+ "description": "Bad named entity: cwint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cwint"
+ ]
+ ]
+ },
+ {
+ "input": "&cwint;",
+ "description": "Named entity: cwint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2231"
+ ]
+ ]
+ },
+ {
+ "input": "&cylcty",
+ "description": "Bad named entity: cylcty without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&cylcty"
+ ]
+ ]
+ },
+ {
+ "input": "&cylcty;",
+ "description": "Named entity: cylcty; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u232d"
+ ]
+ ]
+ },
+ {
+ "input": "&dArr",
+ "description": "Bad named entity: dArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dArr"
+ ]
+ ]
+ },
+ {
+ "input": "&dArr;",
+ "description": "Named entity: dArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d3"
+ ]
+ ]
+ },
+ {
+ "input": "&dHar",
+ "description": "Bad named entity: dHar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dHar"
+ ]
+ ]
+ },
+ {
+ "input": "&dHar;",
+ "description": "Named entity: dHar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2965"
+ ]
+ ]
+ },
+ {
+ "input": "&dagger",
+ "description": "Bad named entity: dagger without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dagger"
+ ]
+ ]
+ },
+ {
+ "input": "&dagger;",
+ "description": "Named entity: dagger; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2020"
+ ]
+ ]
+ },
+ {
+ "input": "&daleth",
+ "description": "Bad named entity: daleth without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&daleth"
+ ]
+ ]
+ },
+ {
+ "input": "&daleth;",
+ "description": "Named entity: daleth; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2138"
+ ]
+ ]
+ },
+ {
+ "input": "&darr",
+ "description": "Bad named entity: darr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&darr"
+ ]
+ ]
+ },
+ {
+ "input": "&darr;",
+ "description": "Named entity: darr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2193"
+ ]
+ ]
+ },
+ {
+ "input": "&dash",
+ "description": "Bad named entity: dash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dash"
+ ]
+ ]
+ },
+ {
+ "input": "&dash;",
+ "description": "Named entity: dash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2010"
+ ]
+ ]
+ },
+ {
+ "input": "&dashv",
+ "description": "Bad named entity: dashv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dashv"
+ ]
+ ]
+ },
+ {
+ "input": "&dashv;",
+ "description": "Named entity: dashv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a3"
+ ]
+ ]
+ },
+ {
+ "input": "&dbkarow",
+ "description": "Bad named entity: dbkarow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dbkarow"
+ ]
+ ]
+ },
+ {
+ "input": "&dbkarow;",
+ "description": "Named entity: dbkarow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u290f"
+ ]
+ ]
+ },
+ {
+ "input": "&dblac",
+ "description": "Bad named entity: dblac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dblac"
+ ]
+ ]
+ },
+ {
+ "input": "&dblac;",
+ "description": "Named entity: dblac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02dd"
+ ]
+ ]
+ },
+ {
+ "input": "&dcaron",
+ "description": "Bad named entity: dcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&dcaron;",
+ "description": "Named entity: dcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u010f"
+ ]
+ ]
+ },
+ {
+ "input": "&dcy",
+ "description": "Bad named entity: dcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dcy"
+ ]
+ ]
+ },
+ {
+ "input": "&dcy;",
+ "description": "Named entity: dcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0434"
+ ]
+ ]
+ },
+ {
+ "input": "&dd",
+ "description": "Bad named entity: dd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dd"
+ ]
+ ]
+ },
+ {
+ "input": "&dd;",
+ "description": "Named entity: dd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2146"
+ ]
+ ]
+ },
+ {
+ "input": "&ddagger",
+ "description": "Bad named entity: ddagger without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ddagger"
+ ]
+ ]
+ },
+ {
+ "input": "&ddagger;",
+ "description": "Named entity: ddagger; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2021"
+ ]
+ ]
+ },
+ {
+ "input": "&ddarr",
+ "description": "Bad named entity: ddarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ddarr"
+ ]
+ ]
+ },
+ {
+ "input": "&ddarr;",
+ "description": "Named entity: ddarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ca"
+ ]
+ ]
+ },
+ {
+ "input": "&ddotseq",
+ "description": "Bad named entity: ddotseq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ddotseq"
+ ]
+ ]
+ },
+ {
+ "input": "&ddotseq;",
+ "description": "Named entity: ddotseq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a77"
+ ]
+ ]
+ },
+ {
+ "input": "&deg",
+ "description": "Named entity: deg without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b0"
+ ]
+ ]
+ },
+ {
+ "input": "&deg;",
+ "description": "Named entity: deg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b0"
+ ]
+ ]
+ },
+ {
+ "input": "&delta",
+ "description": "Bad named entity: delta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&delta"
+ ]
+ ]
+ },
+ {
+ "input": "&delta;",
+ "description": "Named entity: delta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b4"
+ ]
+ ]
+ },
+ {
+ "input": "&demptyv",
+ "description": "Bad named entity: demptyv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&demptyv"
+ ]
+ ]
+ },
+ {
+ "input": "&demptyv;",
+ "description": "Named entity: demptyv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b1"
+ ]
+ ]
+ },
+ {
+ "input": "&dfisht",
+ "description": "Bad named entity: dfisht without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dfisht"
+ ]
+ ]
+ },
+ {
+ "input": "&dfisht;",
+ "description": "Named entity: dfisht; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u297f"
+ ]
+ ]
+ },
+ {
+ "input": "&dfr",
+ "description": "Bad named entity: dfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dfr"
+ ]
+ ]
+ },
+ {
+ "input": "&dfr;",
+ "description": "Named entity: dfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd21"
+ ]
+ ]
+ },
+ {
+ "input": "&dharl",
+ "description": "Bad named entity: dharl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dharl"
+ ]
+ ]
+ },
+ {
+ "input": "&dharl;",
+ "description": "Named entity: dharl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c3"
+ ]
+ ]
+ },
+ {
+ "input": "&dharr",
+ "description": "Bad named entity: dharr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dharr"
+ ]
+ ]
+ },
+ {
+ "input": "&dharr;",
+ "description": "Named entity: dharr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c2"
+ ]
+ ]
+ },
+ {
+ "input": "&diam",
+ "description": "Bad named entity: diam without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&diam"
+ ]
+ ]
+ },
+ {
+ "input": "&diam;",
+ "description": "Named entity: diam; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c4"
+ ]
+ ]
+ },
+ {
+ "input": "&diamond",
+ "description": "Bad named entity: diamond without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&diamond"
+ ]
+ ]
+ },
+ {
+ "input": "&diamond;",
+ "description": "Named entity: diamond; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c4"
+ ]
+ ]
+ },
+ {
+ "input": "&diamondsuit",
+ "description": "Bad named entity: diamondsuit without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&diamondsuit"
+ ]
+ ]
+ },
+ {
+ "input": "&diamondsuit;",
+ "description": "Named entity: diamondsuit; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2666"
+ ]
+ ]
+ },
+ {
+ "input": "&diams",
+ "description": "Bad named entity: diams without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&diams"
+ ]
+ ]
+ },
+ {
+ "input": "&diams;",
+ "description": "Named entity: diams; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2666"
+ ]
+ ]
+ },
+ {
+ "input": "&die",
+ "description": "Bad named entity: die without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&die"
+ ]
+ ]
+ },
+ {
+ "input": "&die;",
+ "description": "Named entity: die; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a8"
+ ]
+ ]
+ },
+ {
+ "input": "&digamma",
+ "description": "Bad named entity: digamma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&digamma"
+ ]
+ ]
+ },
+ {
+ "input": "&digamma;",
+ "description": "Named entity: digamma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03dd"
+ ]
+ ]
+ },
+ {
+ "input": "&disin",
+ "description": "Bad named entity: disin without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&disin"
+ ]
+ ]
+ },
+ {
+ "input": "&disin;",
+ "description": "Named entity: disin; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f2"
+ ]
+ ]
+ },
+ {
+ "input": "&div",
+ "description": "Bad named entity: div without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&div"
+ ]
+ ]
+ },
+ {
+ "input": "&div;",
+ "description": "Named entity: div; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f7"
+ ]
+ ]
+ },
+ {
+ "input": "&divide",
+ "description": "Named entity: divide without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f7"
+ ]
+ ]
+ },
+ {
+ "input": "&divide;",
+ "description": "Named entity: divide; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f7"
+ ]
+ ]
+ },
+ {
+ "input": "&divideontimes;",
+ "description": "Named entity: divideontimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c7"
+ ]
+ ]
+ },
+ {
+ "input": "&divonx",
+ "description": "Bad named entity: divonx without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&divonx"
+ ]
+ ]
+ },
+ {
+ "input": "&divonx;",
+ "description": "Named entity: divonx; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c7"
+ ]
+ ]
+ },
+ {
+ "input": "&djcy",
+ "description": "Bad named entity: djcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&djcy"
+ ]
+ ]
+ },
+ {
+ "input": "&djcy;",
+ "description": "Named entity: djcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0452"
+ ]
+ ]
+ },
+ {
+ "input": "&dlcorn",
+ "description": "Bad named entity: dlcorn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dlcorn"
+ ]
+ ]
+ },
+ {
+ "input": "&dlcorn;",
+ "description": "Named entity: dlcorn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231e"
+ ]
+ ]
+ },
+ {
+ "input": "&dlcrop",
+ "description": "Bad named entity: dlcrop without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dlcrop"
+ ]
+ ]
+ },
+ {
+ "input": "&dlcrop;",
+ "description": "Named entity: dlcrop; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230d"
+ ]
+ ]
+ },
+ {
+ "input": "&dollar",
+ "description": "Bad named entity: dollar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dollar"
+ ]
+ ]
+ },
+ {
+ "input": "&dollar;",
+ "description": "Named entity: dollar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "$"
+ ]
+ ]
+ },
+ {
+ "input": "&dopf",
+ "description": "Bad named entity: dopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dopf"
+ ]
+ ]
+ },
+ {
+ "input": "&dopf;",
+ "description": "Named entity: dopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd55"
+ ]
+ ]
+ },
+ {
+ "input": "&dot",
+ "description": "Bad named entity: dot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dot"
+ ]
+ ]
+ },
+ {
+ "input": "&dot;",
+ "description": "Named entity: dot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02d9"
+ ]
+ ]
+ },
+ {
+ "input": "&doteq",
+ "description": "Bad named entity: doteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&doteq"
+ ]
+ ]
+ },
+ {
+ "input": "&doteq;",
+ "description": "Named entity: doteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2250"
+ ]
+ ]
+ },
+ {
+ "input": "&doteqdot",
+ "description": "Bad named entity: doteqdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&doteqdot"
+ ]
+ ]
+ },
+ {
+ "input": "&doteqdot;",
+ "description": "Named entity: doteqdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2251"
+ ]
+ ]
+ },
+ {
+ "input": "&dotminus",
+ "description": "Bad named entity: dotminus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dotminus"
+ ]
+ ]
+ },
+ {
+ "input": "&dotminus;",
+ "description": "Named entity: dotminus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2238"
+ ]
+ ]
+ },
+ {
+ "input": "&dotplus",
+ "description": "Bad named entity: dotplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dotplus"
+ ]
+ ]
+ },
+ {
+ "input": "&dotplus;",
+ "description": "Named entity: dotplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2214"
+ ]
+ ]
+ },
+ {
+ "input": "&dotsquare",
+ "description": "Bad named entity: dotsquare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dotsquare"
+ ]
+ ]
+ },
+ {
+ "input": "&dotsquare;",
+ "description": "Named entity: dotsquare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a1"
+ ]
+ ]
+ },
+ {
+ "input": "&doublebarwedge",
+ "description": "Bad named entity: doublebarwedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&doublebarwedge"
+ ]
+ ]
+ },
+ {
+ "input": "&doublebarwedge;",
+ "description": "Named entity: doublebarwedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2306"
+ ]
+ ]
+ },
+ {
+ "input": "&downarrow",
+ "description": "Bad named entity: downarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&downarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&downarrow;",
+ "description": "Named entity: downarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2193"
+ ]
+ ]
+ },
+ {
+ "input": "&downdownarrows",
+ "description": "Bad named entity: downdownarrows without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&downdownarrows"
+ ]
+ ]
+ },
+ {
+ "input": "&downdownarrows;",
+ "description": "Named entity: downdownarrows; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ca"
+ ]
+ ]
+ },
+ {
+ "input": "&downharpoonleft",
+ "description": "Bad named entity: downharpoonleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&downharpoonleft"
+ ]
+ ]
+ },
+ {
+ "input": "&downharpoonleft;",
+ "description": "Named entity: downharpoonleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c3"
+ ]
+ ]
+ },
+ {
+ "input": "&downharpoonright",
+ "description": "Bad named entity: downharpoonright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&downharpoonright"
+ ]
+ ]
+ },
+ {
+ "input": "&downharpoonright;",
+ "description": "Named entity: downharpoonright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c2"
+ ]
+ ]
+ },
+ {
+ "input": "&drbkarow",
+ "description": "Bad named entity: drbkarow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&drbkarow"
+ ]
+ ]
+ },
+ {
+ "input": "&drbkarow;",
+ "description": "Named entity: drbkarow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2910"
+ ]
+ ]
+ },
+ {
+ "input": "&drcorn",
+ "description": "Bad named entity: drcorn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&drcorn"
+ ]
+ ]
+ },
+ {
+ "input": "&drcorn;",
+ "description": "Named entity: drcorn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231f"
+ ]
+ ]
+ },
+ {
+ "input": "&drcrop",
+ "description": "Bad named entity: drcrop without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&drcrop"
+ ]
+ ]
+ },
+ {
+ "input": "&drcrop;",
+ "description": "Named entity: drcrop; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230c"
+ ]
+ ]
+ },
+ {
+ "input": "&dscr",
+ "description": "Bad named entity: dscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dscr"
+ ]
+ ]
+ },
+ {
+ "input": "&dscr;",
+ "description": "Named entity: dscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcb9"
+ ]
+ ]
+ },
+ {
+ "input": "&dscy",
+ "description": "Bad named entity: dscy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dscy"
+ ]
+ ]
+ },
+ {
+ "input": "&dscy;",
+ "description": "Named entity: dscy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0455"
+ ]
+ ]
+ },
+ {
+ "input": "&dsol",
+ "description": "Bad named entity: dsol without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dsol"
+ ]
+ ]
+ },
+ {
+ "input": "&dsol;",
+ "description": "Named entity: dsol; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29f6"
+ ]
+ ]
+ },
+ {
+ "input": "&dstrok",
+ "description": "Bad named entity: dstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&dstrok;",
+ "description": "Named entity: dstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0111"
+ ]
+ ]
+ },
+ {
+ "input": "&dtdot",
+ "description": "Bad named entity: dtdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dtdot"
+ ]
+ ]
+ },
+ {
+ "input": "&dtdot;",
+ "description": "Named entity: dtdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f1"
+ ]
+ ]
+ },
+ {
+ "input": "&dtri",
+ "description": "Bad named entity: dtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dtri"
+ ]
+ ]
+ },
+ {
+ "input": "&dtri;",
+ "description": "Named entity: dtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25bf"
+ ]
+ ]
+ },
+ {
+ "input": "&dtrif",
+ "description": "Bad named entity: dtrif without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dtrif"
+ ]
+ ]
+ },
+ {
+ "input": "&dtrif;",
+ "description": "Named entity: dtrif; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25be"
+ ]
+ ]
+ },
+ {
+ "input": "&duarr",
+ "description": "Bad named entity: duarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&duarr"
+ ]
+ ]
+ },
+ {
+ "input": "&duarr;",
+ "description": "Named entity: duarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21f5"
+ ]
+ ]
+ },
+ {
+ "input": "&duhar",
+ "description": "Bad named entity: duhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&duhar"
+ ]
+ ]
+ },
+ {
+ "input": "&duhar;",
+ "description": "Named entity: duhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296f"
+ ]
+ ]
+ },
+ {
+ "input": "&dwangle",
+ "description": "Bad named entity: dwangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dwangle"
+ ]
+ ]
+ },
+ {
+ "input": "&dwangle;",
+ "description": "Named entity: dwangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29a6"
+ ]
+ ]
+ },
+ {
+ "input": "&dzcy",
+ "description": "Bad named entity: dzcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dzcy"
+ ]
+ ]
+ },
+ {
+ "input": "&dzcy;",
+ "description": "Named entity: dzcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u045f"
+ ]
+ ]
+ },
+ {
+ "input": "&dzigrarr",
+ "description": "Bad named entity: dzigrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&dzigrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&dzigrarr;",
+ "description": "Named entity: dzigrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27ff"
+ ]
+ ]
+ },
+ {
+ "input": "&eDDot",
+ "description": "Bad named entity: eDDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eDDot"
+ ]
+ ]
+ },
+ {
+ "input": "&eDDot;",
+ "description": "Named entity: eDDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a77"
+ ]
+ ]
+ },
+ {
+ "input": "&eDot",
+ "description": "Bad named entity: eDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eDot"
+ ]
+ ]
+ },
+ {
+ "input": "&eDot;",
+ "description": "Named entity: eDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2251"
+ ]
+ ]
+ },
+ {
+ "input": "&eacute",
+ "description": "Named entity: eacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e9"
+ ]
+ ]
+ },
+ {
+ "input": "&eacute;",
+ "description": "Named entity: eacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e9"
+ ]
+ ]
+ },
+ {
+ "input": "&easter",
+ "description": "Bad named entity: easter without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&easter"
+ ]
+ ]
+ },
+ {
+ "input": "&easter;",
+ "description": "Named entity: easter; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a6e"
+ ]
+ ]
+ },
+ {
+ "input": "&ecaron",
+ "description": "Bad named entity: ecaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ecaron"
+ ]
+ ]
+ },
+ {
+ "input": "&ecaron;",
+ "description": "Named entity: ecaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u011b"
+ ]
+ ]
+ },
+ {
+ "input": "&ecir",
+ "description": "Bad named entity: ecir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ecir"
+ ]
+ ]
+ },
+ {
+ "input": "&ecir;",
+ "description": "Named entity: ecir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2256"
+ ]
+ ]
+ },
+ {
+ "input": "&ecirc",
+ "description": "Named entity: ecirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ea"
+ ]
+ ]
+ },
+ {
+ "input": "&ecirc;",
+ "description": "Named entity: ecirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ea"
+ ]
+ ]
+ },
+ {
+ "input": "&ecolon",
+ "description": "Bad named entity: ecolon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ecolon"
+ ]
+ ]
+ },
+ {
+ "input": "&ecolon;",
+ "description": "Named entity: ecolon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2255"
+ ]
+ ]
+ },
+ {
+ "input": "&ecy",
+ "description": "Bad named entity: ecy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ecy"
+ ]
+ ]
+ },
+ {
+ "input": "&ecy;",
+ "description": "Named entity: ecy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u044d"
+ ]
+ ]
+ },
+ {
+ "input": "&edot",
+ "description": "Bad named entity: edot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&edot"
+ ]
+ ]
+ },
+ {
+ "input": "&edot;",
+ "description": "Named entity: edot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0117"
+ ]
+ ]
+ },
+ {
+ "input": "&ee",
+ "description": "Bad named entity: ee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ee"
+ ]
+ ]
+ },
+ {
+ "input": "&ee;",
+ "description": "Named entity: ee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2147"
+ ]
+ ]
+ },
+ {
+ "input": "&efDot",
+ "description": "Bad named entity: efDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&efDot"
+ ]
+ ]
+ },
+ {
+ "input": "&efDot;",
+ "description": "Named entity: efDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2252"
+ ]
+ ]
+ },
+ {
+ "input": "&efr",
+ "description": "Bad named entity: efr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&efr"
+ ]
+ ]
+ },
+ {
+ "input": "&efr;",
+ "description": "Named entity: efr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd22"
+ ]
+ ]
+ },
+ {
+ "input": "&eg",
+ "description": "Bad named entity: eg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eg"
+ ]
+ ]
+ },
+ {
+ "input": "&eg;",
+ "description": "Named entity: eg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a9a"
+ ]
+ ]
+ },
+ {
+ "input": "&egrave",
+ "description": "Named entity: egrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00e8"
+ ]
+ ]
+ },
+ {
+ "input": "&egrave;",
+ "description": "Named entity: egrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00e8"
+ ]
+ ]
+ },
+ {
+ "input": "&egs",
+ "description": "Bad named entity: egs without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&egs"
+ ]
+ ]
+ },
+ {
+ "input": "&egs;",
+ "description": "Named entity: egs; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a96"
+ ]
+ ]
+ },
+ {
+ "input": "&egsdot",
+ "description": "Bad named entity: egsdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&egsdot"
+ ]
+ ]
+ },
+ {
+ "input": "&egsdot;",
+ "description": "Named entity: egsdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a98"
+ ]
+ ]
+ },
+ {
+ "input": "&el",
+ "description": "Bad named entity: el without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&el"
+ ]
+ ]
+ },
+ {
+ "input": "&el;",
+ "description": "Named entity: el; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a99"
+ ]
+ ]
+ },
+ {
+ "input": "&elinters",
+ "description": "Bad named entity: elinters without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&elinters"
+ ]
+ ]
+ },
+ {
+ "input": "&elinters;",
+ "description": "Named entity: elinters; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23e7"
+ ]
+ ]
+ },
+ {
+ "input": "&ell",
+ "description": "Bad named entity: ell without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ell"
+ ]
+ ]
+ },
+ {
+ "input": "&ell;",
+ "description": "Named entity: ell; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2113"
+ ]
+ ]
+ },
+ {
+ "input": "&els",
+ "description": "Bad named entity: els without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&els"
+ ]
+ ]
+ },
+ {
+ "input": "&els;",
+ "description": "Named entity: els; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a95"
+ ]
+ ]
+ },
+ {
+ "input": "&elsdot",
+ "description": "Bad named entity: elsdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&elsdot"
+ ]
+ ]
+ },
+ {
+ "input": "&elsdot;",
+ "description": "Named entity: elsdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a97"
+ ]
+ ]
+ },
+ {
+ "input": "&emacr",
+ "description": "Bad named entity: emacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&emacr"
+ ]
+ ]
+ },
+ {
+ "input": "&emacr;",
+ "description": "Named entity: emacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0113"
+ ]
+ ]
+ },
+ {
+ "input": "&empty",
+ "description": "Bad named entity: empty without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&empty"
+ ]
+ ]
+ },
+ {
+ "input": "&empty;",
+ "description": "Named entity: empty; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2205"
+ ]
+ ]
+ },
+ {
+ "input": "&emptyset",
+ "description": "Bad named entity: emptyset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&emptyset"
+ ]
+ ]
+ },
+ {
+ "input": "&emptyset;",
+ "description": "Named entity: emptyset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2205"
+ ]
+ ]
+ },
+ {
+ "input": "&emptyv",
+ "description": "Bad named entity: emptyv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&emptyv"
+ ]
+ ]
+ },
+ {
+ "input": "&emptyv;",
+ "description": "Named entity: emptyv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2205"
+ ]
+ ]
+ },
+ {
+ "input": "&emsp",
+ "description": "Bad named entity: emsp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&emsp"
+ ]
+ ]
+ },
+ {
+ "input": "&emsp13",
+ "description": "Bad named entity: emsp13 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&emsp13"
+ ]
+ ]
+ },
+ {
+ "input": "&emsp13;",
+ "description": "Named entity: emsp13; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2004"
+ ]
+ ]
+ },
+ {
+ "input": "&emsp14",
+ "description": "Bad named entity: emsp14 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&emsp14"
+ ]
+ ]
+ },
+ {
+ "input": "&emsp14;",
+ "description": "Named entity: emsp14; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2005"
+ ]
+ ]
+ },
+ {
+ "input": "&emsp;",
+ "description": "Named entity: emsp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2003"
+ ]
+ ]
+ },
+ {
+ "input": "&eng",
+ "description": "Bad named entity: eng without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eng"
+ ]
+ ]
+ },
+ {
+ "input": "&eng;",
+ "description": "Named entity: eng; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u014b"
+ ]
+ ]
+ },
+ {
+ "input": "&ensp",
+ "description": "Bad named entity: ensp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ensp"
+ ]
+ ]
+ },
+ {
+ "input": "&ensp;",
+ "description": "Named entity: ensp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2002"
+ ]
+ ]
+ },
+ {
+ "input": "&eogon",
+ "description": "Bad named entity: eogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eogon"
+ ]
+ ]
+ },
+ {
+ "input": "&eogon;",
+ "description": "Named entity: eogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0119"
+ ]
+ ]
+ },
+ {
+ "input": "&eopf",
+ "description": "Bad named entity: eopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eopf"
+ ]
+ ]
+ },
+ {
+ "input": "&eopf;",
+ "description": "Named entity: eopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd56"
+ ]
+ ]
+ },
+ {
+ "input": "&epar",
+ "description": "Bad named entity: epar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&epar"
+ ]
+ ]
+ },
+ {
+ "input": "&epar;",
+ "description": "Named entity: epar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d5"
+ ]
+ ]
+ },
+ {
+ "input": "&eparsl",
+ "description": "Bad named entity: eparsl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eparsl"
+ ]
+ ]
+ },
+ {
+ "input": "&eparsl;",
+ "description": "Named entity: eparsl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29e3"
+ ]
+ ]
+ },
+ {
+ "input": "&eplus",
+ "description": "Bad named entity: eplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eplus"
+ ]
+ ]
+ },
+ {
+ "input": "&eplus;",
+ "description": "Named entity: eplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a71"
+ ]
+ ]
+ },
+ {
+ "input": "&epsi",
+ "description": "Bad named entity: epsi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&epsi"
+ ]
+ ]
+ },
+ {
+ "input": "&epsi;",
+ "description": "Named entity: epsi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b5"
+ ]
+ ]
+ },
+ {
+ "input": "&epsilon",
+ "description": "Bad named entity: epsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&epsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&epsilon;",
+ "description": "Named entity: epsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b5"
+ ]
+ ]
+ },
+ {
+ "input": "&epsiv",
+ "description": "Bad named entity: epsiv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&epsiv"
+ ]
+ ]
+ },
+ {
+ "input": "&epsiv;",
+ "description": "Named entity: epsiv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f5"
+ ]
+ ]
+ },
+ {
+ "input": "&eqcirc",
+ "description": "Bad named entity: eqcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eqcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&eqcirc;",
+ "description": "Named entity: eqcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2256"
+ ]
+ ]
+ },
+ {
+ "input": "&eqcolon",
+ "description": "Bad named entity: eqcolon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eqcolon"
+ ]
+ ]
+ },
+ {
+ "input": "&eqcolon;",
+ "description": "Named entity: eqcolon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2255"
+ ]
+ ]
+ },
+ {
+ "input": "&eqsim",
+ "description": "Bad named entity: eqsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eqsim"
+ ]
+ ]
+ },
+ {
+ "input": "&eqsim;",
+ "description": "Named entity: eqsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2242"
+ ]
+ ]
+ },
+ {
+ "input": "&eqslantgtr",
+ "description": "Bad named entity: eqslantgtr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eqslantgtr"
+ ]
+ ]
+ },
+ {
+ "input": "&eqslantgtr;",
+ "description": "Named entity: eqslantgtr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a96"
+ ]
+ ]
+ },
+ {
+ "input": "&eqslantless",
+ "description": "Bad named entity: eqslantless without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eqslantless"
+ ]
+ ]
+ },
+ {
+ "input": "&eqslantless;",
+ "description": "Named entity: eqslantless; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a95"
+ ]
+ ]
+ },
+ {
+ "input": "&equals",
+ "description": "Bad named entity: equals without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&equals"
+ ]
+ ]
+ },
+ {
+ "input": "&equals;",
+ "description": "Named entity: equals; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "="
+ ]
+ ]
+ },
+ {
+ "input": "&equest",
+ "description": "Bad named entity: equest without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&equest"
+ ]
+ ]
+ },
+ {
+ "input": "&equest;",
+ "description": "Named entity: equest; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u225f"
+ ]
+ ]
+ },
+ {
+ "input": "&equiv",
+ "description": "Bad named entity: equiv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&equiv"
+ ]
+ ]
+ },
+ {
+ "input": "&equiv;",
+ "description": "Named entity: equiv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2261"
+ ]
+ ]
+ },
+ {
+ "input": "&equivDD",
+ "description": "Bad named entity: equivDD without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&equivDD"
+ ]
+ ]
+ },
+ {
+ "input": "&equivDD;",
+ "description": "Named entity: equivDD; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a78"
+ ]
+ ]
+ },
+ {
+ "input": "&eqvparsl",
+ "description": "Bad named entity: eqvparsl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eqvparsl"
+ ]
+ ]
+ },
+ {
+ "input": "&eqvparsl;",
+ "description": "Named entity: eqvparsl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29e5"
+ ]
+ ]
+ },
+ {
+ "input": "&erDot",
+ "description": "Bad named entity: erDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&erDot"
+ ]
+ ]
+ },
+ {
+ "input": "&erDot;",
+ "description": "Named entity: erDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2253"
+ ]
+ ]
+ },
+ {
+ "input": "&erarr",
+ "description": "Bad named entity: erarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&erarr"
+ ]
+ ]
+ },
+ {
+ "input": "&erarr;",
+ "description": "Named entity: erarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2971"
+ ]
+ ]
+ },
+ {
+ "input": "&escr",
+ "description": "Bad named entity: escr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&escr"
+ ]
+ ]
+ },
+ {
+ "input": "&escr;",
+ "description": "Named entity: escr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u212f"
+ ]
+ ]
+ },
+ {
+ "input": "&esdot",
+ "description": "Bad named entity: esdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&esdot"
+ ]
+ ]
+ },
+ {
+ "input": "&esdot;",
+ "description": "Named entity: esdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2250"
+ ]
+ ]
+ },
+ {
+ "input": "&esim",
+ "description": "Bad named entity: esim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&esim"
+ ]
+ ]
+ },
+ {
+ "input": "&esim;",
+ "description": "Named entity: esim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2242"
+ ]
+ ]
+ },
+ {
+ "input": "&eta",
+ "description": "Bad named entity: eta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&eta"
+ ]
+ ]
+ },
+ {
+ "input": "&eta;",
+ "description": "Named entity: eta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b7"
+ ]
+ ]
+ },
+ {
+ "input": "&eth",
+ "description": "Named entity: eth without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f0"
+ ]
+ ]
+ },
+ {
+ "input": "&eth;",
+ "description": "Named entity: eth; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f0"
+ ]
+ ]
+ },
+ {
+ "input": "&euml",
+ "description": "Named entity: euml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00eb"
+ ]
+ ]
+ },
+ {
+ "input": "&euml;",
+ "description": "Named entity: euml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00eb"
+ ]
+ ]
+ },
+ {
+ "input": "&euro",
+ "description": "Bad named entity: euro without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&euro"
+ ]
+ ]
+ },
+ {
+ "input": "&euro;",
+ "description": "Named entity: euro; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u20ac"
+ ]
+ ]
+ },
+ {
+ "input": "&excl",
+ "description": "Bad named entity: excl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&excl"
+ ]
+ ]
+ },
+ {
+ "input": "&excl;",
+ "description": "Named entity: excl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "!"
+ ]
+ ]
+ },
+ {
+ "input": "&exist",
+ "description": "Bad named entity: exist without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&exist"
+ ]
+ ]
+ },
+ {
+ "input": "&exist;",
+ "description": "Named entity: exist; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2203"
+ ]
+ ]
+ },
+ {
+ "input": "&expectation",
+ "description": "Bad named entity: expectation without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&expectation"
+ ]
+ ]
+ },
+ {
+ "input": "&expectation;",
+ "description": "Named entity: expectation; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2130"
+ ]
+ ]
+ },
+ {
+ "input": "&exponentiale",
+ "description": "Bad named entity: exponentiale without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&exponentiale"
+ ]
+ ]
+ },
+ {
+ "input": "&exponentiale;",
+ "description": "Named entity: exponentiale; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2147"
+ ]
+ ]
+ },
+ {
+ "input": "&fallingdotseq",
+ "description": "Bad named entity: fallingdotseq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fallingdotseq"
+ ]
+ ]
+ },
+ {
+ "input": "&fallingdotseq;",
+ "description": "Named entity: fallingdotseq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2252"
+ ]
+ ]
+ },
+ {
+ "input": "&fcy",
+ "description": "Bad named entity: fcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fcy"
+ ]
+ ]
+ },
+ {
+ "input": "&fcy;",
+ "description": "Named entity: fcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0444"
+ ]
+ ]
+ },
+ {
+ "input": "&female",
+ "description": "Bad named entity: female without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&female"
+ ]
+ ]
+ },
+ {
+ "input": "&female;",
+ "description": "Named entity: female; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2640"
+ ]
+ ]
+ },
+ {
+ "input": "&ffilig",
+ "description": "Bad named entity: ffilig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ffilig"
+ ]
+ ]
+ },
+ {
+ "input": "&ffilig;",
+ "description": "Named entity: ffilig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ufb03"
+ ]
+ ]
+ },
+ {
+ "input": "&fflig",
+ "description": "Bad named entity: fflig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fflig"
+ ]
+ ]
+ },
+ {
+ "input": "&fflig;",
+ "description": "Named entity: fflig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ufb00"
+ ]
+ ]
+ },
+ {
+ "input": "&ffllig",
+ "description": "Bad named entity: ffllig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ffllig"
+ ]
+ ]
+ },
+ {
+ "input": "&ffllig;",
+ "description": "Named entity: ffllig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ufb04"
+ ]
+ ]
+ },
+ {
+ "input": "&ffr",
+ "description": "Bad named entity: ffr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ffr"
+ ]
+ ]
+ },
+ {
+ "input": "&ffr;",
+ "description": "Named entity: ffr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd23"
+ ]
+ ]
+ },
+ {
+ "input": "&filig",
+ "description": "Bad named entity: filig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&filig"
+ ]
+ ]
+ },
+ {
+ "input": "&filig;",
+ "description": "Named entity: filig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ufb01"
+ ]
+ ]
+ },
+ {
+ "input": "&fjlig",
+ "description": "Bad named entity: fjlig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fjlig"
+ ]
+ ]
+ },
+ {
+ "input": "&fjlig;",
+ "description": "Named entity: fjlig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "fj"
+ ]
+ ]
+ },
+ {
+ "input": "&flat",
+ "description": "Bad named entity: flat without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&flat"
+ ]
+ ]
+ },
+ {
+ "input": "&flat;",
+ "description": "Named entity: flat; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u266d"
+ ]
+ ]
+ },
+ {
+ "input": "&fllig",
+ "description": "Bad named entity: fllig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fllig"
+ ]
+ ]
+ },
+ {
+ "input": "&fllig;",
+ "description": "Named entity: fllig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ufb02"
+ ]
+ ]
+ },
+ {
+ "input": "&fltns",
+ "description": "Bad named entity: fltns without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fltns"
+ ]
+ ]
+ },
+ {
+ "input": "&fltns;",
+ "description": "Named entity: fltns; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b1"
+ ]
+ ]
+ },
+ {
+ "input": "&fnof",
+ "description": "Bad named entity: fnof without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fnof"
+ ]
+ ]
+ },
+ {
+ "input": "&fnof;",
+ "description": "Named entity: fnof; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0192"
+ ]
+ ]
+ },
+ {
+ "input": "&fopf",
+ "description": "Bad named entity: fopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fopf"
+ ]
+ ]
+ },
+ {
+ "input": "&fopf;",
+ "description": "Named entity: fopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd57"
+ ]
+ ]
+ },
+ {
+ "input": "&forall",
+ "description": "Bad named entity: forall without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&forall"
+ ]
+ ]
+ },
+ {
+ "input": "&forall;",
+ "description": "Named entity: forall; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2200"
+ ]
+ ]
+ },
+ {
+ "input": "&fork",
+ "description": "Bad named entity: fork without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fork"
+ ]
+ ]
+ },
+ {
+ "input": "&fork;",
+ "description": "Named entity: fork; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d4"
+ ]
+ ]
+ },
+ {
+ "input": "&forkv",
+ "description": "Bad named entity: forkv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&forkv"
+ ]
+ ]
+ },
+ {
+ "input": "&forkv;",
+ "description": "Named entity: forkv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad9"
+ ]
+ ]
+ },
+ {
+ "input": "&fpartint",
+ "description": "Bad named entity: fpartint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fpartint"
+ ]
+ ]
+ },
+ {
+ "input": "&fpartint;",
+ "description": "Named entity: fpartint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a0d"
+ ]
+ ]
+ },
+ {
+ "input": "&frac12",
+ "description": "Named entity: frac12 without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00bd"
+ ]
+ ]
+ },
+ {
+ "input": "&frac12;",
+ "description": "Named entity: frac12; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00bd"
+ ]
+ ]
+ },
+ {
+ "input": "&frac13",
+ "description": "Bad named entity: frac13 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac13"
+ ]
+ ]
+ },
+ {
+ "input": "&frac13;",
+ "description": "Named entity: frac13; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2153"
+ ]
+ ]
+ },
+ {
+ "input": "&frac14",
+ "description": "Named entity: frac14 without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00bc"
+ ]
+ ]
+ },
+ {
+ "input": "&frac14;",
+ "description": "Named entity: frac14; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00bc"
+ ]
+ ]
+ },
+ {
+ "input": "&frac15",
+ "description": "Bad named entity: frac15 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac15"
+ ]
+ ]
+ },
+ {
+ "input": "&frac15;",
+ "description": "Named entity: frac15; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2155"
+ ]
+ ]
+ },
+ {
+ "input": "&frac16",
+ "description": "Bad named entity: frac16 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac16"
+ ]
+ ]
+ },
+ {
+ "input": "&frac16;",
+ "description": "Named entity: frac16; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2159"
+ ]
+ ]
+ },
+ {
+ "input": "&frac18",
+ "description": "Bad named entity: frac18 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac18"
+ ]
+ ]
+ },
+ {
+ "input": "&frac18;",
+ "description": "Named entity: frac18; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u215b"
+ ]
+ ]
+ },
+ {
+ "input": "&frac23",
+ "description": "Bad named entity: frac23 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac23"
+ ]
+ ]
+ },
+ {
+ "input": "&frac23;",
+ "description": "Named entity: frac23; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2154"
+ ]
+ ]
+ },
+ {
+ "input": "&frac25",
+ "description": "Bad named entity: frac25 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac25"
+ ]
+ ]
+ },
+ {
+ "input": "&frac25;",
+ "description": "Named entity: frac25; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2156"
+ ]
+ ]
+ },
+ {
+ "input": "&frac34",
+ "description": "Named entity: frac34 without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00be"
+ ]
+ ]
+ },
+ {
+ "input": "&frac34;",
+ "description": "Named entity: frac34; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00be"
+ ]
+ ]
+ },
+ {
+ "input": "&frac35",
+ "description": "Bad named entity: frac35 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac35"
+ ]
+ ]
+ },
+ {
+ "input": "&frac35;",
+ "description": "Named entity: frac35; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2157"
+ ]
+ ]
+ },
+ {
+ "input": "&frac38",
+ "description": "Bad named entity: frac38 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac38"
+ ]
+ ]
+ },
+ {
+ "input": "&frac38;",
+ "description": "Named entity: frac38; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u215c"
+ ]
+ ]
+ },
+ {
+ "input": "&frac45",
+ "description": "Bad named entity: frac45 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac45"
+ ]
+ ]
+ },
+ {
+ "input": "&frac45;",
+ "description": "Named entity: frac45; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2158"
+ ]
+ ]
+ },
+ {
+ "input": "&frac56",
+ "description": "Bad named entity: frac56 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac56"
+ ]
+ ]
+ },
+ {
+ "input": "&frac56;",
+ "description": "Named entity: frac56; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u215a"
+ ]
+ ]
+ },
+ {
+ "input": "&frac58",
+ "description": "Bad named entity: frac58 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac58"
+ ]
+ ]
+ },
+ {
+ "input": "&frac58;",
+ "description": "Named entity: frac58; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u215d"
+ ]
+ ]
+ },
+ {
+ "input": "&frac78",
+ "description": "Bad named entity: frac78 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frac78"
+ ]
+ ]
+ },
+ {
+ "input": "&frac78;",
+ "description": "Named entity: frac78; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u215e"
+ ]
+ ]
+ },
+ {
+ "input": "&frasl",
+ "description": "Bad named entity: frasl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frasl"
+ ]
+ ]
+ },
+ {
+ "input": "&frasl;",
+ "description": "Named entity: frasl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2044"
+ ]
+ ]
+ },
+ {
+ "input": "&frown",
+ "description": "Bad named entity: frown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&frown"
+ ]
+ ]
+ },
+ {
+ "input": "&frown;",
+ "description": "Named entity: frown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2322"
+ ]
+ ]
+ },
+ {
+ "input": "&fscr",
+ "description": "Bad named entity: fscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&fscr"
+ ]
+ ]
+ },
+ {
+ "input": "&fscr;",
+ "description": "Named entity: fscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcbb"
+ ]
+ ]
+ },
+ {
+ "input": "&gE",
+ "description": "Bad named entity: gE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gE"
+ ]
+ ]
+ },
+ {
+ "input": "&gE;",
+ "description": "Named entity: gE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2267"
+ ]
+ ]
+ },
+ {
+ "input": "&gEl",
+ "description": "Bad named entity: gEl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gEl"
+ ]
+ ]
+ },
+ {
+ "input": "&gEl;",
+ "description": "Named entity: gEl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8c"
+ ]
+ ]
+ },
+ {
+ "input": "&gacute",
+ "description": "Bad named entity: gacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gacute"
+ ]
+ ]
+ },
+ {
+ "input": "&gacute;",
+ "description": "Named entity: gacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u01f5"
+ ]
+ ]
+ },
+ {
+ "input": "&gamma",
+ "description": "Bad named entity: gamma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gamma"
+ ]
+ ]
+ },
+ {
+ "input": "&gamma;",
+ "description": "Named entity: gamma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b3"
+ ]
+ ]
+ },
+ {
+ "input": "&gammad",
+ "description": "Bad named entity: gammad without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gammad"
+ ]
+ ]
+ },
+ {
+ "input": "&gammad;",
+ "description": "Named entity: gammad; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03dd"
+ ]
+ ]
+ },
+ {
+ "input": "&gap",
+ "description": "Bad named entity: gap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gap"
+ ]
+ ]
+ },
+ {
+ "input": "&gap;",
+ "description": "Named entity: gap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a86"
+ ]
+ ]
+ },
+ {
+ "input": "&gbreve",
+ "description": "Bad named entity: gbreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gbreve"
+ ]
+ ]
+ },
+ {
+ "input": "&gbreve;",
+ "description": "Named entity: gbreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u011f"
+ ]
+ ]
+ },
+ {
+ "input": "&gcirc",
+ "description": "Bad named entity: gcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&gcirc;",
+ "description": "Named entity: gcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u011d"
+ ]
+ ]
+ },
+ {
+ "input": "&gcy",
+ "description": "Bad named entity: gcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gcy"
+ ]
+ ]
+ },
+ {
+ "input": "&gcy;",
+ "description": "Named entity: gcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0433"
+ ]
+ ]
+ },
+ {
+ "input": "&gdot",
+ "description": "Bad named entity: gdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gdot"
+ ]
+ ]
+ },
+ {
+ "input": "&gdot;",
+ "description": "Named entity: gdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0121"
+ ]
+ ]
+ },
+ {
+ "input": "&ge",
+ "description": "Bad named entity: ge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ge"
+ ]
+ ]
+ },
+ {
+ "input": "&ge;",
+ "description": "Named entity: ge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2265"
+ ]
+ ]
+ },
+ {
+ "input": "&gel",
+ "description": "Bad named entity: gel without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gel"
+ ]
+ ]
+ },
+ {
+ "input": "&gel;",
+ "description": "Named entity: gel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22db"
+ ]
+ ]
+ },
+ {
+ "input": "&geq",
+ "description": "Bad named entity: geq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&geq"
+ ]
+ ]
+ },
+ {
+ "input": "&geq;",
+ "description": "Named entity: geq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2265"
+ ]
+ ]
+ },
+ {
+ "input": "&geqq",
+ "description": "Bad named entity: geqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&geqq"
+ ]
+ ]
+ },
+ {
+ "input": "&geqq;",
+ "description": "Named entity: geqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2267"
+ ]
+ ]
+ },
+ {
+ "input": "&geqslant",
+ "description": "Bad named entity: geqslant without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&geqslant"
+ ]
+ ]
+ },
+ {
+ "input": "&geqslant;",
+ "description": "Named entity: geqslant; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7e"
+ ]
+ ]
+ },
+ {
+ "input": "&ges",
+ "description": "Bad named entity: ges without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ges"
+ ]
+ ]
+ },
+ {
+ "input": "&ges;",
+ "description": "Named entity: ges; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7e"
+ ]
+ ]
+ },
+ {
+ "input": "&gescc",
+ "description": "Bad named entity: gescc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gescc"
+ ]
+ ]
+ },
+ {
+ "input": "&gescc;",
+ "description": "Named entity: gescc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa9"
+ ]
+ ]
+ },
+ {
+ "input": "&gesdot",
+ "description": "Bad named entity: gesdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gesdot"
+ ]
+ ]
+ },
+ {
+ "input": "&gesdot;",
+ "description": "Named entity: gesdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a80"
+ ]
+ ]
+ },
+ {
+ "input": "&gesdoto",
+ "description": "Bad named entity: gesdoto without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gesdoto"
+ ]
+ ]
+ },
+ {
+ "input": "&gesdoto;",
+ "description": "Named entity: gesdoto; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a82"
+ ]
+ ]
+ },
+ {
+ "input": "&gesdotol",
+ "description": "Bad named entity: gesdotol without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gesdotol"
+ ]
+ ]
+ },
+ {
+ "input": "&gesdotol;",
+ "description": "Named entity: gesdotol; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a84"
+ ]
+ ]
+ },
+ {
+ "input": "&gesl",
+ "description": "Bad named entity: gesl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gesl"
+ ]
+ ]
+ },
+ {
+ "input": "&gesl;",
+ "description": "Named entity: gesl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22db\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&gesles",
+ "description": "Bad named entity: gesles without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gesles"
+ ]
+ ]
+ },
+ {
+ "input": "&gesles;",
+ "description": "Named entity: gesles; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a94"
+ ]
+ ]
+ },
+ {
+ "input": "&gfr",
+ "description": "Bad named entity: gfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gfr"
+ ]
+ ]
+ },
+ {
+ "input": "&gfr;",
+ "description": "Named entity: gfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd24"
+ ]
+ ]
+ },
+ {
+ "input": "&gg",
+ "description": "Bad named entity: gg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gg"
+ ]
+ ]
+ },
+ {
+ "input": "&gg;",
+ "description": "Named entity: gg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226b"
+ ]
+ ]
+ },
+ {
+ "input": "&ggg",
+ "description": "Bad named entity: ggg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ggg"
+ ]
+ ]
+ },
+ {
+ "input": "&ggg;",
+ "description": "Named entity: ggg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d9"
+ ]
+ ]
+ },
+ {
+ "input": "&gimel",
+ "description": "Bad named entity: gimel without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gimel"
+ ]
+ ]
+ },
+ {
+ "input": "&gimel;",
+ "description": "Named entity: gimel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2137"
+ ]
+ ]
+ },
+ {
+ "input": "&gjcy",
+ "description": "Bad named entity: gjcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gjcy"
+ ]
+ ]
+ },
+ {
+ "input": "&gjcy;",
+ "description": "Named entity: gjcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0453"
+ ]
+ ]
+ },
+ {
+ "input": "&gl",
+ "description": "Bad named entity: gl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gl"
+ ]
+ ]
+ },
+ {
+ "input": "&gl;",
+ "description": "Named entity: gl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2277"
+ ]
+ ]
+ },
+ {
+ "input": "&glE",
+ "description": "Bad named entity: glE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&glE"
+ ]
+ ]
+ },
+ {
+ "input": "&glE;",
+ "description": "Named entity: glE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a92"
+ ]
+ ]
+ },
+ {
+ "input": "&gla",
+ "description": "Bad named entity: gla without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gla"
+ ]
+ ]
+ },
+ {
+ "input": "&gla;",
+ "description": "Named entity: gla; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa5"
+ ]
+ ]
+ },
+ {
+ "input": "&glj",
+ "description": "Bad named entity: glj without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&glj"
+ ]
+ ]
+ },
+ {
+ "input": "&glj;",
+ "description": "Named entity: glj; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa4"
+ ]
+ ]
+ },
+ {
+ "input": "&gnE",
+ "description": "Bad named entity: gnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gnE"
+ ]
+ ]
+ },
+ {
+ "input": "&gnE;",
+ "description": "Named entity: gnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2269"
+ ]
+ ]
+ },
+ {
+ "input": "&gnap",
+ "description": "Bad named entity: gnap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gnap"
+ ]
+ ]
+ },
+ {
+ "input": "&gnap;",
+ "description": "Named entity: gnap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8a"
+ ]
+ ]
+ },
+ {
+ "input": "&gnapprox",
+ "description": "Bad named entity: gnapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gnapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&gnapprox;",
+ "description": "Named entity: gnapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8a"
+ ]
+ ]
+ },
+ {
+ "input": "&gne",
+ "description": "Bad named entity: gne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gne"
+ ]
+ ]
+ },
+ {
+ "input": "&gne;",
+ "description": "Named entity: gne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a88"
+ ]
+ ]
+ },
+ {
+ "input": "&gneq",
+ "description": "Bad named entity: gneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gneq"
+ ]
+ ]
+ },
+ {
+ "input": "&gneq;",
+ "description": "Named entity: gneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a88"
+ ]
+ ]
+ },
+ {
+ "input": "&gneqq",
+ "description": "Bad named entity: gneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&gneqq;",
+ "description": "Named entity: gneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2269"
+ ]
+ ]
+ },
+ {
+ "input": "&gnsim",
+ "description": "Bad named entity: gnsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gnsim"
+ ]
+ ]
+ },
+ {
+ "input": "&gnsim;",
+ "description": "Named entity: gnsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e7"
+ ]
+ ]
+ },
+ {
+ "input": "&gopf",
+ "description": "Bad named entity: gopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gopf"
+ ]
+ ]
+ },
+ {
+ "input": "&gopf;",
+ "description": "Named entity: gopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd58"
+ ]
+ ]
+ },
+ {
+ "input": "&grave",
+ "description": "Bad named entity: grave without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&grave"
+ ]
+ ]
+ },
+ {
+ "input": "&grave;",
+ "description": "Named entity: grave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "`"
+ ]
+ ]
+ },
+ {
+ "input": "&gscr",
+ "description": "Bad named entity: gscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gscr"
+ ]
+ ]
+ },
+ {
+ "input": "&gscr;",
+ "description": "Named entity: gscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210a"
+ ]
+ ]
+ },
+ {
+ "input": "&gsim",
+ "description": "Bad named entity: gsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gsim"
+ ]
+ ]
+ },
+ {
+ "input": "&gsim;",
+ "description": "Named entity: gsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2273"
+ ]
+ ]
+ },
+ {
+ "input": "&gsime",
+ "description": "Bad named entity: gsime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gsime"
+ ]
+ ]
+ },
+ {
+ "input": "&gsime;",
+ "description": "Named entity: gsime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8e"
+ ]
+ ]
+ },
+ {
+ "input": "&gsiml",
+ "description": "Bad named entity: gsiml without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gsiml"
+ ]
+ ]
+ },
+ {
+ "input": "&gsiml;",
+ "description": "Named entity: gsiml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a90"
+ ]
+ ]
+ },
+ {
+ "input": "&gt",
+ "description": "Named entity: gt without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ ">"
+ ]
+ ]
+ },
+ {
+ "input": "&gt;",
+ "description": "Named entity: gt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ">"
+ ]
+ ]
+ },
+ {
+ "input": "&gtcc;",
+ "description": "Named entity: gtcc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa7"
+ ]
+ ]
+ },
+ {
+ "input": "&gtcir;",
+ "description": "Named entity: gtcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7a"
+ ]
+ ]
+ },
+ {
+ "input": "&gtdot;",
+ "description": "Named entity: gtdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d7"
+ ]
+ ]
+ },
+ {
+ "input": "&gtlPar;",
+ "description": "Named entity: gtlPar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2995"
+ ]
+ ]
+ },
+ {
+ "input": "&gtquest;",
+ "description": "Named entity: gtquest; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7c"
+ ]
+ ]
+ },
+ {
+ "input": "&gtrapprox;",
+ "description": "Named entity: gtrapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a86"
+ ]
+ ]
+ },
+ {
+ "input": "&gtrarr;",
+ "description": "Named entity: gtrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2978"
+ ]
+ ]
+ },
+ {
+ "input": "&gtrdot;",
+ "description": "Named entity: gtrdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d7"
+ ]
+ ]
+ },
+ {
+ "input": "&gtreqless;",
+ "description": "Named entity: gtreqless; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22db"
+ ]
+ ]
+ },
+ {
+ "input": "&gtreqqless;",
+ "description": "Named entity: gtreqqless; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8c"
+ ]
+ ]
+ },
+ {
+ "input": "&gtrless;",
+ "description": "Named entity: gtrless; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2277"
+ ]
+ ]
+ },
+ {
+ "input": "&gtrsim;",
+ "description": "Named entity: gtrsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2273"
+ ]
+ ]
+ },
+ {
+ "input": "&gvertneqq",
+ "description": "Bad named entity: gvertneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gvertneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&gvertneqq;",
+ "description": "Named entity: gvertneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2269\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&gvnE",
+ "description": "Bad named entity: gvnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&gvnE"
+ ]
+ ]
+ },
+ {
+ "input": "&gvnE;",
+ "description": "Named entity: gvnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2269\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&hArr",
+ "description": "Bad named entity: hArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hArr"
+ ]
+ ]
+ },
+ {
+ "input": "&hArr;",
+ "description": "Named entity: hArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d4"
+ ]
+ ]
+ },
+ {
+ "input": "&hairsp",
+ "description": "Bad named entity: hairsp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hairsp"
+ ]
+ ]
+ },
+ {
+ "input": "&hairsp;",
+ "description": "Named entity: hairsp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200a"
+ ]
+ ]
+ },
+ {
+ "input": "&half",
+ "description": "Bad named entity: half without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&half"
+ ]
+ ]
+ },
+ {
+ "input": "&half;",
+ "description": "Named entity: half; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00bd"
+ ]
+ ]
+ },
+ {
+ "input": "&hamilt",
+ "description": "Bad named entity: hamilt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hamilt"
+ ]
+ ]
+ },
+ {
+ "input": "&hamilt;",
+ "description": "Named entity: hamilt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210b"
+ ]
+ ]
+ },
+ {
+ "input": "&hardcy",
+ "description": "Bad named entity: hardcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hardcy"
+ ]
+ ]
+ },
+ {
+ "input": "&hardcy;",
+ "description": "Named entity: hardcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u044a"
+ ]
+ ]
+ },
+ {
+ "input": "&harr",
+ "description": "Bad named entity: harr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&harr"
+ ]
+ ]
+ },
+ {
+ "input": "&harr;",
+ "description": "Named entity: harr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2194"
+ ]
+ ]
+ },
+ {
+ "input": "&harrcir",
+ "description": "Bad named entity: harrcir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&harrcir"
+ ]
+ ]
+ },
+ {
+ "input": "&harrcir;",
+ "description": "Named entity: harrcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2948"
+ ]
+ ]
+ },
+ {
+ "input": "&harrw",
+ "description": "Bad named entity: harrw without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&harrw"
+ ]
+ ]
+ },
+ {
+ "input": "&harrw;",
+ "description": "Named entity: harrw; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ad"
+ ]
+ ]
+ },
+ {
+ "input": "&hbar",
+ "description": "Bad named entity: hbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hbar"
+ ]
+ ]
+ },
+ {
+ "input": "&hbar;",
+ "description": "Named entity: hbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210f"
+ ]
+ ]
+ },
+ {
+ "input": "&hcirc",
+ "description": "Bad named entity: hcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&hcirc;",
+ "description": "Named entity: hcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0125"
+ ]
+ ]
+ },
+ {
+ "input": "&hearts",
+ "description": "Bad named entity: hearts without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hearts"
+ ]
+ ]
+ },
+ {
+ "input": "&hearts;",
+ "description": "Named entity: hearts; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2665"
+ ]
+ ]
+ },
+ {
+ "input": "&heartsuit",
+ "description": "Bad named entity: heartsuit without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&heartsuit"
+ ]
+ ]
+ },
+ {
+ "input": "&heartsuit;",
+ "description": "Named entity: heartsuit; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2665"
+ ]
+ ]
+ },
+ {
+ "input": "&hellip",
+ "description": "Bad named entity: hellip without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hellip"
+ ]
+ ]
+ },
+ {
+ "input": "&hellip;",
+ "description": "Named entity: hellip; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2026"
+ ]
+ ]
+ },
+ {
+ "input": "&hercon",
+ "description": "Bad named entity: hercon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hercon"
+ ]
+ ]
+ },
+ {
+ "input": "&hercon;",
+ "description": "Named entity: hercon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b9"
+ ]
+ ]
+ },
+ {
+ "input": "&hfr",
+ "description": "Bad named entity: hfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hfr"
+ ]
+ ]
+ },
+ {
+ "input": "&hfr;",
+ "description": "Named entity: hfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd25"
+ ]
+ ]
+ },
+ {
+ "input": "&hksearow",
+ "description": "Bad named entity: hksearow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hksearow"
+ ]
+ ]
+ },
+ {
+ "input": "&hksearow;",
+ "description": "Named entity: hksearow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2925"
+ ]
+ ]
+ },
+ {
+ "input": "&hkswarow",
+ "description": "Bad named entity: hkswarow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hkswarow"
+ ]
+ ]
+ },
+ {
+ "input": "&hkswarow;",
+ "description": "Named entity: hkswarow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2926"
+ ]
+ ]
+ },
+ {
+ "input": "&hoarr",
+ "description": "Bad named entity: hoarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hoarr"
+ ]
+ ]
+ },
+ {
+ "input": "&hoarr;",
+ "description": "Named entity: hoarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ff"
+ ]
+ ]
+ },
+ {
+ "input": "&homtht",
+ "description": "Bad named entity: homtht without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&homtht"
+ ]
+ ]
+ },
+ {
+ "input": "&homtht;",
+ "description": "Named entity: homtht; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223b"
+ ]
+ ]
+ },
+ {
+ "input": "&hookleftarrow",
+ "description": "Bad named entity: hookleftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hookleftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&hookleftarrow;",
+ "description": "Named entity: hookleftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a9"
+ ]
+ ]
+ },
+ {
+ "input": "&hookrightarrow",
+ "description": "Bad named entity: hookrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hookrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&hookrightarrow;",
+ "description": "Named entity: hookrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21aa"
+ ]
+ ]
+ },
+ {
+ "input": "&hopf",
+ "description": "Bad named entity: hopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hopf"
+ ]
+ ]
+ },
+ {
+ "input": "&hopf;",
+ "description": "Named entity: hopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd59"
+ ]
+ ]
+ },
+ {
+ "input": "&horbar",
+ "description": "Bad named entity: horbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&horbar"
+ ]
+ ]
+ },
+ {
+ "input": "&horbar;",
+ "description": "Named entity: horbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2015"
+ ]
+ ]
+ },
+ {
+ "input": "&hscr",
+ "description": "Bad named entity: hscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hscr"
+ ]
+ ]
+ },
+ {
+ "input": "&hscr;",
+ "description": "Named entity: hscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcbd"
+ ]
+ ]
+ },
+ {
+ "input": "&hslash",
+ "description": "Bad named entity: hslash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hslash"
+ ]
+ ]
+ },
+ {
+ "input": "&hslash;",
+ "description": "Named entity: hslash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210f"
+ ]
+ ]
+ },
+ {
+ "input": "&hstrok",
+ "description": "Bad named entity: hstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&hstrok;",
+ "description": "Named entity: hstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0127"
+ ]
+ ]
+ },
+ {
+ "input": "&hybull",
+ "description": "Bad named entity: hybull without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hybull"
+ ]
+ ]
+ },
+ {
+ "input": "&hybull;",
+ "description": "Named entity: hybull; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2043"
+ ]
+ ]
+ },
+ {
+ "input": "&hyphen",
+ "description": "Bad named entity: hyphen without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&hyphen"
+ ]
+ ]
+ },
+ {
+ "input": "&hyphen;",
+ "description": "Named entity: hyphen; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2010"
+ ]
+ ]
+ },
+ {
+ "input": "&iacute",
+ "description": "Named entity: iacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ed"
+ ]
+ ]
+ },
+ {
+ "input": "&iacute;",
+ "description": "Named entity: iacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ed"
+ ]
+ ]
+ },
+ {
+ "input": "&ic",
+ "description": "Bad named entity: ic without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ic"
+ ]
+ ]
+ },
+ {
+ "input": "&ic;",
+ "description": "Named entity: ic; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2063"
+ ]
+ ]
+ },
+ {
+ "input": "&icirc",
+ "description": "Named entity: icirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ee"
+ ]
+ ]
+ },
+ {
+ "input": "&icirc;",
+ "description": "Named entity: icirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ee"
+ ]
+ ]
+ },
+ {
+ "input": "&icy",
+ "description": "Bad named entity: icy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&icy"
+ ]
+ ]
+ },
+ {
+ "input": "&icy;",
+ "description": "Named entity: icy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0438"
+ ]
+ ]
+ },
+ {
+ "input": "&iecy",
+ "description": "Bad named entity: iecy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iecy"
+ ]
+ ]
+ },
+ {
+ "input": "&iecy;",
+ "description": "Named entity: iecy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0435"
+ ]
+ ]
+ },
+ {
+ "input": "&iexcl",
+ "description": "Named entity: iexcl without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a1"
+ ]
+ ]
+ },
+ {
+ "input": "&iexcl;",
+ "description": "Named entity: iexcl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a1"
+ ]
+ ]
+ },
+ {
+ "input": "&iff",
+ "description": "Bad named entity: iff without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iff"
+ ]
+ ]
+ },
+ {
+ "input": "&iff;",
+ "description": "Named entity: iff; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d4"
+ ]
+ ]
+ },
+ {
+ "input": "&ifr",
+ "description": "Bad named entity: ifr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ifr"
+ ]
+ ]
+ },
+ {
+ "input": "&ifr;",
+ "description": "Named entity: ifr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd26"
+ ]
+ ]
+ },
+ {
+ "input": "&igrave",
+ "description": "Named entity: igrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ec"
+ ]
+ ]
+ },
+ {
+ "input": "&igrave;",
+ "description": "Named entity: igrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ec"
+ ]
+ ]
+ },
+ {
+ "input": "&ii",
+ "description": "Bad named entity: ii without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ii"
+ ]
+ ]
+ },
+ {
+ "input": "&ii;",
+ "description": "Named entity: ii; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2148"
+ ]
+ ]
+ },
+ {
+ "input": "&iiiint",
+ "description": "Bad named entity: iiiint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iiiint"
+ ]
+ ]
+ },
+ {
+ "input": "&iiiint;",
+ "description": "Named entity: iiiint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a0c"
+ ]
+ ]
+ },
+ {
+ "input": "&iiint",
+ "description": "Bad named entity: iiint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iiint"
+ ]
+ ]
+ },
+ {
+ "input": "&iiint;",
+ "description": "Named entity: iiint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222d"
+ ]
+ ]
+ },
+ {
+ "input": "&iinfin",
+ "description": "Bad named entity: iinfin without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iinfin"
+ ]
+ ]
+ },
+ {
+ "input": "&iinfin;",
+ "description": "Named entity: iinfin; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29dc"
+ ]
+ ]
+ },
+ {
+ "input": "&iiota",
+ "description": "Bad named entity: iiota without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iiota"
+ ]
+ ]
+ },
+ {
+ "input": "&iiota;",
+ "description": "Named entity: iiota; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2129"
+ ]
+ ]
+ },
+ {
+ "input": "&ijlig",
+ "description": "Bad named entity: ijlig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ijlig"
+ ]
+ ]
+ },
+ {
+ "input": "&ijlig;",
+ "description": "Named entity: ijlig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0133"
+ ]
+ ]
+ },
+ {
+ "input": "&imacr",
+ "description": "Bad named entity: imacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&imacr"
+ ]
+ ]
+ },
+ {
+ "input": "&imacr;",
+ "description": "Named entity: imacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u012b"
+ ]
+ ]
+ },
+ {
+ "input": "&image",
+ "description": "Bad named entity: image without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&image"
+ ]
+ ]
+ },
+ {
+ "input": "&image;",
+ "description": "Named entity: image; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2111"
+ ]
+ ]
+ },
+ {
+ "input": "&imagline",
+ "description": "Bad named entity: imagline without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&imagline"
+ ]
+ ]
+ },
+ {
+ "input": "&imagline;",
+ "description": "Named entity: imagline; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2110"
+ ]
+ ]
+ },
+ {
+ "input": "&imagpart",
+ "description": "Bad named entity: imagpart without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&imagpart"
+ ]
+ ]
+ },
+ {
+ "input": "&imagpart;",
+ "description": "Named entity: imagpart; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2111"
+ ]
+ ]
+ },
+ {
+ "input": "&imath",
+ "description": "Bad named entity: imath without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&imath"
+ ]
+ ]
+ },
+ {
+ "input": "&imath;",
+ "description": "Named entity: imath; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0131"
+ ]
+ ]
+ },
+ {
+ "input": "&imof",
+ "description": "Bad named entity: imof without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&imof"
+ ]
+ ]
+ },
+ {
+ "input": "&imof;",
+ "description": "Named entity: imof; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b7"
+ ]
+ ]
+ },
+ {
+ "input": "&imped",
+ "description": "Bad named entity: imped without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&imped"
+ ]
+ ]
+ },
+ {
+ "input": "&imped;",
+ "description": "Named entity: imped; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u01b5"
+ ]
+ ]
+ },
+ {
+ "input": "&in",
+ "description": "Bad named entity: in without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&in"
+ ]
+ ]
+ },
+ {
+ "input": "&in;",
+ "description": "Named entity: in; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2208"
+ ]
+ ]
+ },
+ {
+ "input": "&incare",
+ "description": "Bad named entity: incare without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&incare"
+ ]
+ ]
+ },
+ {
+ "input": "&incare;",
+ "description": "Named entity: incare; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2105"
+ ]
+ ]
+ },
+ {
+ "input": "&infin",
+ "description": "Bad named entity: infin without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&infin"
+ ]
+ ]
+ },
+ {
+ "input": "&infin;",
+ "description": "Named entity: infin; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221e"
+ ]
+ ]
+ },
+ {
+ "input": "&infintie",
+ "description": "Bad named entity: infintie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&infintie"
+ ]
+ ]
+ },
+ {
+ "input": "&infintie;",
+ "description": "Named entity: infintie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29dd"
+ ]
+ ]
+ },
+ {
+ "input": "&inodot",
+ "description": "Bad named entity: inodot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&inodot"
+ ]
+ ]
+ },
+ {
+ "input": "&inodot;",
+ "description": "Named entity: inodot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0131"
+ ]
+ ]
+ },
+ {
+ "input": "&int",
+ "description": "Bad named entity: int without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&int"
+ ]
+ ]
+ },
+ {
+ "input": "&int;",
+ "description": "Named entity: int; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222b"
+ ]
+ ]
+ },
+ {
+ "input": "&intcal",
+ "description": "Bad named entity: intcal without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&intcal"
+ ]
+ ]
+ },
+ {
+ "input": "&intcal;",
+ "description": "Named entity: intcal; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ba"
+ ]
+ ]
+ },
+ {
+ "input": "&integers",
+ "description": "Bad named entity: integers without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&integers"
+ ]
+ ]
+ },
+ {
+ "input": "&integers;",
+ "description": "Named entity: integers; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2124"
+ ]
+ ]
+ },
+ {
+ "input": "&intercal",
+ "description": "Bad named entity: intercal without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&intercal"
+ ]
+ ]
+ },
+ {
+ "input": "&intercal;",
+ "description": "Named entity: intercal; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ba"
+ ]
+ ]
+ },
+ {
+ "input": "&intlarhk",
+ "description": "Bad named entity: intlarhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&intlarhk"
+ ]
+ ]
+ },
+ {
+ "input": "&intlarhk;",
+ "description": "Named entity: intlarhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a17"
+ ]
+ ]
+ },
+ {
+ "input": "&intprod",
+ "description": "Bad named entity: intprod without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&intprod"
+ ]
+ ]
+ },
+ {
+ "input": "&intprod;",
+ "description": "Named entity: intprod; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a3c"
+ ]
+ ]
+ },
+ {
+ "input": "&iocy",
+ "description": "Bad named entity: iocy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iocy"
+ ]
+ ]
+ },
+ {
+ "input": "&iocy;",
+ "description": "Named entity: iocy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0451"
+ ]
+ ]
+ },
+ {
+ "input": "&iogon",
+ "description": "Bad named entity: iogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iogon"
+ ]
+ ]
+ },
+ {
+ "input": "&iogon;",
+ "description": "Named entity: iogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u012f"
+ ]
+ ]
+ },
+ {
+ "input": "&iopf",
+ "description": "Bad named entity: iopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iopf"
+ ]
+ ]
+ },
+ {
+ "input": "&iopf;",
+ "description": "Named entity: iopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd5a"
+ ]
+ ]
+ },
+ {
+ "input": "&iota",
+ "description": "Bad named entity: iota without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iota"
+ ]
+ ]
+ },
+ {
+ "input": "&iota;",
+ "description": "Named entity: iota; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b9"
+ ]
+ ]
+ },
+ {
+ "input": "&iprod",
+ "description": "Bad named entity: iprod without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iprod"
+ ]
+ ]
+ },
+ {
+ "input": "&iprod;",
+ "description": "Named entity: iprod; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a3c"
+ ]
+ ]
+ },
+ {
+ "input": "&iquest",
+ "description": "Named entity: iquest without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00bf"
+ ]
+ ]
+ },
+ {
+ "input": "&iquest;",
+ "description": "Named entity: iquest; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00bf"
+ ]
+ ]
+ },
+ {
+ "input": "&iscr",
+ "description": "Bad named entity: iscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iscr"
+ ]
+ ]
+ },
+ {
+ "input": "&iscr;",
+ "description": "Named entity: iscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcbe"
+ ]
+ ]
+ },
+ {
+ "input": "&isin",
+ "description": "Bad named entity: isin without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&isin"
+ ]
+ ]
+ },
+ {
+ "input": "&isin;",
+ "description": "Named entity: isin; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2208"
+ ]
+ ]
+ },
+ {
+ "input": "&isinE",
+ "description": "Bad named entity: isinE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&isinE"
+ ]
+ ]
+ },
+ {
+ "input": "&isinE;",
+ "description": "Named entity: isinE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f9"
+ ]
+ ]
+ },
+ {
+ "input": "&isindot",
+ "description": "Bad named entity: isindot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&isindot"
+ ]
+ ]
+ },
+ {
+ "input": "&isindot;",
+ "description": "Named entity: isindot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f5"
+ ]
+ ]
+ },
+ {
+ "input": "&isins",
+ "description": "Bad named entity: isins without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&isins"
+ ]
+ ]
+ },
+ {
+ "input": "&isins;",
+ "description": "Named entity: isins; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f4"
+ ]
+ ]
+ },
+ {
+ "input": "&isinsv",
+ "description": "Bad named entity: isinsv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&isinsv"
+ ]
+ ]
+ },
+ {
+ "input": "&isinsv;",
+ "description": "Named entity: isinsv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f3"
+ ]
+ ]
+ },
+ {
+ "input": "&isinv",
+ "description": "Bad named entity: isinv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&isinv"
+ ]
+ ]
+ },
+ {
+ "input": "&isinv;",
+ "description": "Named entity: isinv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2208"
+ ]
+ ]
+ },
+ {
+ "input": "&it",
+ "description": "Bad named entity: it without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&it"
+ ]
+ ]
+ },
+ {
+ "input": "&it;",
+ "description": "Named entity: it; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2062"
+ ]
+ ]
+ },
+ {
+ "input": "&itilde",
+ "description": "Bad named entity: itilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&itilde"
+ ]
+ ]
+ },
+ {
+ "input": "&itilde;",
+ "description": "Named entity: itilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0129"
+ ]
+ ]
+ },
+ {
+ "input": "&iukcy",
+ "description": "Bad named entity: iukcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&iukcy"
+ ]
+ ]
+ },
+ {
+ "input": "&iukcy;",
+ "description": "Named entity: iukcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0456"
+ ]
+ ]
+ },
+ {
+ "input": "&iuml",
+ "description": "Named entity: iuml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ef"
+ ]
+ ]
+ },
+ {
+ "input": "&iuml;",
+ "description": "Named entity: iuml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ef"
+ ]
+ ]
+ },
+ {
+ "input": "&jcirc",
+ "description": "Bad named entity: jcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&jcirc;",
+ "description": "Named entity: jcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0135"
+ ]
+ ]
+ },
+ {
+ "input": "&jcy",
+ "description": "Bad named entity: jcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jcy"
+ ]
+ ]
+ },
+ {
+ "input": "&jcy;",
+ "description": "Named entity: jcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0439"
+ ]
+ ]
+ },
+ {
+ "input": "&jfr",
+ "description": "Bad named entity: jfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jfr"
+ ]
+ ]
+ },
+ {
+ "input": "&jfr;",
+ "description": "Named entity: jfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd27"
+ ]
+ ]
+ },
+ {
+ "input": "&jmath",
+ "description": "Bad named entity: jmath without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jmath"
+ ]
+ ]
+ },
+ {
+ "input": "&jmath;",
+ "description": "Named entity: jmath; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0237"
+ ]
+ ]
+ },
+ {
+ "input": "&jopf",
+ "description": "Bad named entity: jopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jopf"
+ ]
+ ]
+ },
+ {
+ "input": "&jopf;",
+ "description": "Named entity: jopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd5b"
+ ]
+ ]
+ },
+ {
+ "input": "&jscr",
+ "description": "Bad named entity: jscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jscr"
+ ]
+ ]
+ },
+ {
+ "input": "&jscr;",
+ "description": "Named entity: jscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcbf"
+ ]
+ ]
+ },
+ {
+ "input": "&jsercy",
+ "description": "Bad named entity: jsercy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jsercy"
+ ]
+ ]
+ },
+ {
+ "input": "&jsercy;",
+ "description": "Named entity: jsercy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0458"
+ ]
+ ]
+ },
+ {
+ "input": "&jukcy",
+ "description": "Bad named entity: jukcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&jukcy"
+ ]
+ ]
+ },
+ {
+ "input": "&jukcy;",
+ "description": "Named entity: jukcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0454"
+ ]
+ ]
+ },
+ {
+ "input": "&kappa",
+ "description": "Bad named entity: kappa without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kappa"
+ ]
+ ]
+ },
+ {
+ "input": "&kappa;",
+ "description": "Named entity: kappa; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03ba"
+ ]
+ ]
+ },
+ {
+ "input": "&kappav",
+ "description": "Bad named entity: kappav without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kappav"
+ ]
+ ]
+ },
+ {
+ "input": "&kappav;",
+ "description": "Named entity: kappav; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f0"
+ ]
+ ]
+ },
+ {
+ "input": "&kcedil",
+ "description": "Bad named entity: kcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&kcedil;",
+ "description": "Named entity: kcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0137"
+ ]
+ ]
+ },
+ {
+ "input": "&kcy",
+ "description": "Bad named entity: kcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kcy"
+ ]
+ ]
+ },
+ {
+ "input": "&kcy;",
+ "description": "Named entity: kcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u043a"
+ ]
+ ]
+ },
+ {
+ "input": "&kfr",
+ "description": "Bad named entity: kfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kfr"
+ ]
+ ]
+ },
+ {
+ "input": "&kfr;",
+ "description": "Named entity: kfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd28"
+ ]
+ ]
+ },
+ {
+ "input": "&kgreen",
+ "description": "Bad named entity: kgreen without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kgreen"
+ ]
+ ]
+ },
+ {
+ "input": "&kgreen;",
+ "description": "Named entity: kgreen; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0138"
+ ]
+ ]
+ },
+ {
+ "input": "&khcy",
+ "description": "Bad named entity: khcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&khcy"
+ ]
+ ]
+ },
+ {
+ "input": "&khcy;",
+ "description": "Named entity: khcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0445"
+ ]
+ ]
+ },
+ {
+ "input": "&kjcy",
+ "description": "Bad named entity: kjcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kjcy"
+ ]
+ ]
+ },
+ {
+ "input": "&kjcy;",
+ "description": "Named entity: kjcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u045c"
+ ]
+ ]
+ },
+ {
+ "input": "&kopf",
+ "description": "Bad named entity: kopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kopf"
+ ]
+ ]
+ },
+ {
+ "input": "&kopf;",
+ "description": "Named entity: kopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd5c"
+ ]
+ ]
+ },
+ {
+ "input": "&kscr",
+ "description": "Bad named entity: kscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&kscr"
+ ]
+ ]
+ },
+ {
+ "input": "&kscr;",
+ "description": "Named entity: kscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc0"
+ ]
+ ]
+ },
+ {
+ "input": "&lAarr",
+ "description": "Bad named entity: lAarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lAarr"
+ ]
+ ]
+ },
+ {
+ "input": "&lAarr;",
+ "description": "Named entity: lAarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21da"
+ ]
+ ]
+ },
+ {
+ "input": "&lArr",
+ "description": "Bad named entity: lArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lArr"
+ ]
+ ]
+ },
+ {
+ "input": "&lArr;",
+ "description": "Named entity: lArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d0"
+ ]
+ ]
+ },
+ {
+ "input": "&lAtail",
+ "description": "Bad named entity: lAtail without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lAtail"
+ ]
+ ]
+ },
+ {
+ "input": "&lAtail;",
+ "description": "Named entity: lAtail; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u291b"
+ ]
+ ]
+ },
+ {
+ "input": "&lBarr",
+ "description": "Bad named entity: lBarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lBarr"
+ ]
+ ]
+ },
+ {
+ "input": "&lBarr;",
+ "description": "Named entity: lBarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u290e"
+ ]
+ ]
+ },
+ {
+ "input": "&lE",
+ "description": "Bad named entity: lE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lE"
+ ]
+ ]
+ },
+ {
+ "input": "&lE;",
+ "description": "Named entity: lE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2266"
+ ]
+ ]
+ },
+ {
+ "input": "&lEg",
+ "description": "Bad named entity: lEg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lEg"
+ ]
+ ]
+ },
+ {
+ "input": "&lEg;",
+ "description": "Named entity: lEg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8b"
+ ]
+ ]
+ },
+ {
+ "input": "&lHar",
+ "description": "Bad named entity: lHar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lHar"
+ ]
+ ]
+ },
+ {
+ "input": "&lHar;",
+ "description": "Named entity: lHar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2962"
+ ]
+ ]
+ },
+ {
+ "input": "&lacute",
+ "description": "Bad named entity: lacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lacute"
+ ]
+ ]
+ },
+ {
+ "input": "&lacute;",
+ "description": "Named entity: lacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u013a"
+ ]
+ ]
+ },
+ {
+ "input": "&laemptyv",
+ "description": "Bad named entity: laemptyv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&laemptyv"
+ ]
+ ]
+ },
+ {
+ "input": "&laemptyv;",
+ "description": "Named entity: laemptyv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b4"
+ ]
+ ]
+ },
+ {
+ "input": "&lagran",
+ "description": "Bad named entity: lagran without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lagran"
+ ]
+ ]
+ },
+ {
+ "input": "&lagran;",
+ "description": "Named entity: lagran; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2112"
+ ]
+ ]
+ },
+ {
+ "input": "&lambda",
+ "description": "Bad named entity: lambda without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lambda"
+ ]
+ ]
+ },
+ {
+ "input": "&lambda;",
+ "description": "Named entity: lambda; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03bb"
+ ]
+ ]
+ },
+ {
+ "input": "&lang",
+ "description": "Bad named entity: lang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lang"
+ ]
+ ]
+ },
+ {
+ "input": "&lang;",
+ "description": "Named entity: lang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e8"
+ ]
+ ]
+ },
+ {
+ "input": "&langd",
+ "description": "Bad named entity: langd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&langd"
+ ]
+ ]
+ },
+ {
+ "input": "&langd;",
+ "description": "Named entity: langd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2991"
+ ]
+ ]
+ },
+ {
+ "input": "&langle",
+ "description": "Bad named entity: langle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&langle"
+ ]
+ ]
+ },
+ {
+ "input": "&langle;",
+ "description": "Named entity: langle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e8"
+ ]
+ ]
+ },
+ {
+ "input": "&lap",
+ "description": "Bad named entity: lap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lap"
+ ]
+ ]
+ },
+ {
+ "input": "&lap;",
+ "description": "Named entity: lap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a85"
+ ]
+ ]
+ },
+ {
+ "input": "&laquo",
+ "description": "Named entity: laquo without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ab"
+ ]
+ ]
+ },
+ {
+ "input": "&laquo;",
+ "description": "Named entity: laquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ab"
+ ]
+ ]
+ },
+ {
+ "input": "&larr",
+ "description": "Bad named entity: larr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larr"
+ ]
+ ]
+ },
+ {
+ "input": "&larr;",
+ "description": "Named entity: larr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2190"
+ ]
+ ]
+ },
+ {
+ "input": "&larrb",
+ "description": "Bad named entity: larrb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrb"
+ ]
+ ]
+ },
+ {
+ "input": "&larrb;",
+ "description": "Named entity: larrb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21e4"
+ ]
+ ]
+ },
+ {
+ "input": "&larrbfs",
+ "description": "Bad named entity: larrbfs without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrbfs"
+ ]
+ ]
+ },
+ {
+ "input": "&larrbfs;",
+ "description": "Named entity: larrbfs; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u291f"
+ ]
+ ]
+ },
+ {
+ "input": "&larrfs",
+ "description": "Bad named entity: larrfs without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrfs"
+ ]
+ ]
+ },
+ {
+ "input": "&larrfs;",
+ "description": "Named entity: larrfs; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u291d"
+ ]
+ ]
+ },
+ {
+ "input": "&larrhk",
+ "description": "Bad named entity: larrhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrhk"
+ ]
+ ]
+ },
+ {
+ "input": "&larrhk;",
+ "description": "Named entity: larrhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a9"
+ ]
+ ]
+ },
+ {
+ "input": "&larrlp",
+ "description": "Bad named entity: larrlp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrlp"
+ ]
+ ]
+ },
+ {
+ "input": "&larrlp;",
+ "description": "Named entity: larrlp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ab"
+ ]
+ ]
+ },
+ {
+ "input": "&larrpl",
+ "description": "Bad named entity: larrpl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrpl"
+ ]
+ ]
+ },
+ {
+ "input": "&larrpl;",
+ "description": "Named entity: larrpl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2939"
+ ]
+ ]
+ },
+ {
+ "input": "&larrsim",
+ "description": "Bad named entity: larrsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrsim"
+ ]
+ ]
+ },
+ {
+ "input": "&larrsim;",
+ "description": "Named entity: larrsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2973"
+ ]
+ ]
+ },
+ {
+ "input": "&larrtl",
+ "description": "Bad named entity: larrtl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&larrtl"
+ ]
+ ]
+ },
+ {
+ "input": "&larrtl;",
+ "description": "Named entity: larrtl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a2"
+ ]
+ ]
+ },
+ {
+ "input": "&lat",
+ "description": "Bad named entity: lat without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lat"
+ ]
+ ]
+ },
+ {
+ "input": "&lat;",
+ "description": "Named entity: lat; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aab"
+ ]
+ ]
+ },
+ {
+ "input": "&latail",
+ "description": "Bad named entity: latail without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&latail"
+ ]
+ ]
+ },
+ {
+ "input": "&latail;",
+ "description": "Named entity: latail; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2919"
+ ]
+ ]
+ },
+ {
+ "input": "&late",
+ "description": "Bad named entity: late without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&late"
+ ]
+ ]
+ },
+ {
+ "input": "&late;",
+ "description": "Named entity: late; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aad"
+ ]
+ ]
+ },
+ {
+ "input": "&lates",
+ "description": "Bad named entity: lates without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lates"
+ ]
+ ]
+ },
+ {
+ "input": "&lates;",
+ "description": "Named entity: lates; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aad\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&lbarr",
+ "description": "Bad named entity: lbarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbarr"
+ ]
+ ]
+ },
+ {
+ "input": "&lbarr;",
+ "description": "Named entity: lbarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u290c"
+ ]
+ ]
+ },
+ {
+ "input": "&lbbrk",
+ "description": "Bad named entity: lbbrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbbrk"
+ ]
+ ]
+ },
+ {
+ "input": "&lbbrk;",
+ "description": "Named entity: lbbrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2772"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrace",
+ "description": "Bad named entity: lbrace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbrace"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrace;",
+ "description": "Named entity: lbrace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "{"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrack",
+ "description": "Bad named entity: lbrack without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbrack"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrack;",
+ "description": "Named entity: lbrack; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "["
+ ]
+ ]
+ },
+ {
+ "input": "&lbrke",
+ "description": "Bad named entity: lbrke without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbrke"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrke;",
+ "description": "Named entity: lbrke; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u298b"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrksld",
+ "description": "Bad named entity: lbrksld without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbrksld"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrksld;",
+ "description": "Named entity: lbrksld; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u298f"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrkslu",
+ "description": "Bad named entity: lbrkslu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lbrkslu"
+ ]
+ ]
+ },
+ {
+ "input": "&lbrkslu;",
+ "description": "Named entity: lbrkslu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u298d"
+ ]
+ ]
+ },
+ {
+ "input": "&lcaron",
+ "description": "Bad named entity: lcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&lcaron;",
+ "description": "Named entity: lcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u013e"
+ ]
+ ]
+ },
+ {
+ "input": "&lcedil",
+ "description": "Bad named entity: lcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&lcedil;",
+ "description": "Named entity: lcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u013c"
+ ]
+ ]
+ },
+ {
+ "input": "&lceil",
+ "description": "Bad named entity: lceil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lceil"
+ ]
+ ]
+ },
+ {
+ "input": "&lceil;",
+ "description": "Named entity: lceil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2308"
+ ]
+ ]
+ },
+ {
+ "input": "&lcub",
+ "description": "Bad named entity: lcub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lcub"
+ ]
+ ]
+ },
+ {
+ "input": "&lcub;",
+ "description": "Named entity: lcub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "{"
+ ]
+ ]
+ },
+ {
+ "input": "&lcy",
+ "description": "Bad named entity: lcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lcy"
+ ]
+ ]
+ },
+ {
+ "input": "&lcy;",
+ "description": "Named entity: lcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u043b"
+ ]
+ ]
+ },
+ {
+ "input": "&ldca",
+ "description": "Bad named entity: ldca without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ldca"
+ ]
+ ]
+ },
+ {
+ "input": "&ldca;",
+ "description": "Named entity: ldca; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2936"
+ ]
+ ]
+ },
+ {
+ "input": "&ldquo",
+ "description": "Bad named entity: ldquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ldquo"
+ ]
+ ]
+ },
+ {
+ "input": "&ldquo;",
+ "description": "Named entity: ldquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201c"
+ ]
+ ]
+ },
+ {
+ "input": "&ldquor",
+ "description": "Bad named entity: ldquor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ldquor"
+ ]
+ ]
+ },
+ {
+ "input": "&ldquor;",
+ "description": "Named entity: ldquor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201e"
+ ]
+ ]
+ },
+ {
+ "input": "&ldrdhar",
+ "description": "Bad named entity: ldrdhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ldrdhar"
+ ]
+ ]
+ },
+ {
+ "input": "&ldrdhar;",
+ "description": "Named entity: ldrdhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2967"
+ ]
+ ]
+ },
+ {
+ "input": "&ldrushar",
+ "description": "Bad named entity: ldrushar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ldrushar"
+ ]
+ ]
+ },
+ {
+ "input": "&ldrushar;",
+ "description": "Named entity: ldrushar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u294b"
+ ]
+ ]
+ },
+ {
+ "input": "&ldsh",
+ "description": "Bad named entity: ldsh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ldsh"
+ ]
+ ]
+ },
+ {
+ "input": "&ldsh;",
+ "description": "Named entity: ldsh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b2"
+ ]
+ ]
+ },
+ {
+ "input": "&le",
+ "description": "Bad named entity: le without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&le"
+ ]
+ ]
+ },
+ {
+ "input": "&le;",
+ "description": "Named entity: le; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2264"
+ ]
+ ]
+ },
+ {
+ "input": "&leftarrow",
+ "description": "Bad named entity: leftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&leftarrow;",
+ "description": "Named entity: leftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2190"
+ ]
+ ]
+ },
+ {
+ "input": "&leftarrowtail",
+ "description": "Bad named entity: leftarrowtail without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftarrowtail"
+ ]
+ ]
+ },
+ {
+ "input": "&leftarrowtail;",
+ "description": "Named entity: leftarrowtail; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a2"
+ ]
+ ]
+ },
+ {
+ "input": "&leftharpoondown",
+ "description": "Bad named entity: leftharpoondown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftharpoondown"
+ ]
+ ]
+ },
+ {
+ "input": "&leftharpoondown;",
+ "description": "Named entity: leftharpoondown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bd"
+ ]
+ ]
+ },
+ {
+ "input": "&leftharpoonup",
+ "description": "Bad named entity: leftharpoonup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftharpoonup"
+ ]
+ ]
+ },
+ {
+ "input": "&leftharpoonup;",
+ "description": "Named entity: leftharpoonup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bc"
+ ]
+ ]
+ },
+ {
+ "input": "&leftleftarrows",
+ "description": "Bad named entity: leftleftarrows without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftleftarrows"
+ ]
+ ]
+ },
+ {
+ "input": "&leftleftarrows;",
+ "description": "Named entity: leftleftarrows; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c7"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightarrow",
+ "description": "Bad named entity: leftrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightarrow;",
+ "description": "Named entity: leftrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2194"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightarrows",
+ "description": "Bad named entity: leftrightarrows without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftrightarrows"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightarrows;",
+ "description": "Named entity: leftrightarrows; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c6"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightharpoons",
+ "description": "Bad named entity: leftrightharpoons without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftrightharpoons"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightharpoons;",
+ "description": "Named entity: leftrightharpoons; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cb"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightsquigarrow",
+ "description": "Bad named entity: leftrightsquigarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftrightsquigarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&leftrightsquigarrow;",
+ "description": "Named entity: leftrightsquigarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ad"
+ ]
+ ]
+ },
+ {
+ "input": "&leftthreetimes",
+ "description": "Bad named entity: leftthreetimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leftthreetimes"
+ ]
+ ]
+ },
+ {
+ "input": "&leftthreetimes;",
+ "description": "Named entity: leftthreetimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cb"
+ ]
+ ]
+ },
+ {
+ "input": "&leg",
+ "description": "Bad named entity: leg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leg"
+ ]
+ ]
+ },
+ {
+ "input": "&leg;",
+ "description": "Named entity: leg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22da"
+ ]
+ ]
+ },
+ {
+ "input": "&leq",
+ "description": "Bad named entity: leq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leq"
+ ]
+ ]
+ },
+ {
+ "input": "&leq;",
+ "description": "Named entity: leq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2264"
+ ]
+ ]
+ },
+ {
+ "input": "&leqq",
+ "description": "Bad named entity: leqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leqq"
+ ]
+ ]
+ },
+ {
+ "input": "&leqq;",
+ "description": "Named entity: leqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2266"
+ ]
+ ]
+ },
+ {
+ "input": "&leqslant",
+ "description": "Bad named entity: leqslant without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&leqslant"
+ ]
+ ]
+ },
+ {
+ "input": "&leqslant;",
+ "description": "Named entity: leqslant; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7d"
+ ]
+ ]
+ },
+ {
+ "input": "&les",
+ "description": "Bad named entity: les without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&les"
+ ]
+ ]
+ },
+ {
+ "input": "&les;",
+ "description": "Named entity: les; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7d"
+ ]
+ ]
+ },
+ {
+ "input": "&lescc",
+ "description": "Bad named entity: lescc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lescc"
+ ]
+ ]
+ },
+ {
+ "input": "&lescc;",
+ "description": "Named entity: lescc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa8"
+ ]
+ ]
+ },
+ {
+ "input": "&lesdot",
+ "description": "Bad named entity: lesdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesdot"
+ ]
+ ]
+ },
+ {
+ "input": "&lesdot;",
+ "description": "Named entity: lesdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7f"
+ ]
+ ]
+ },
+ {
+ "input": "&lesdoto",
+ "description": "Bad named entity: lesdoto without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesdoto"
+ ]
+ ]
+ },
+ {
+ "input": "&lesdoto;",
+ "description": "Named entity: lesdoto; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a81"
+ ]
+ ]
+ },
+ {
+ "input": "&lesdotor",
+ "description": "Bad named entity: lesdotor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesdotor"
+ ]
+ ]
+ },
+ {
+ "input": "&lesdotor;",
+ "description": "Named entity: lesdotor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a83"
+ ]
+ ]
+ },
+ {
+ "input": "&lesg",
+ "description": "Bad named entity: lesg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesg"
+ ]
+ ]
+ },
+ {
+ "input": "&lesg;",
+ "description": "Named entity: lesg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22da\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&lesges",
+ "description": "Bad named entity: lesges without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesges"
+ ]
+ ]
+ },
+ {
+ "input": "&lesges;",
+ "description": "Named entity: lesges; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a93"
+ ]
+ ]
+ },
+ {
+ "input": "&lessapprox",
+ "description": "Bad named entity: lessapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lessapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&lessapprox;",
+ "description": "Named entity: lessapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a85"
+ ]
+ ]
+ },
+ {
+ "input": "&lessdot",
+ "description": "Bad named entity: lessdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lessdot"
+ ]
+ ]
+ },
+ {
+ "input": "&lessdot;",
+ "description": "Named entity: lessdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d6"
+ ]
+ ]
+ },
+ {
+ "input": "&lesseqgtr",
+ "description": "Bad named entity: lesseqgtr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesseqgtr"
+ ]
+ ]
+ },
+ {
+ "input": "&lesseqgtr;",
+ "description": "Named entity: lesseqgtr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22da"
+ ]
+ ]
+ },
+ {
+ "input": "&lesseqqgtr",
+ "description": "Bad named entity: lesseqqgtr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesseqqgtr"
+ ]
+ ]
+ },
+ {
+ "input": "&lesseqqgtr;",
+ "description": "Named entity: lesseqqgtr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8b"
+ ]
+ ]
+ },
+ {
+ "input": "&lessgtr",
+ "description": "Bad named entity: lessgtr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lessgtr"
+ ]
+ ]
+ },
+ {
+ "input": "&lessgtr;",
+ "description": "Named entity: lessgtr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2276"
+ ]
+ ]
+ },
+ {
+ "input": "&lesssim",
+ "description": "Bad named entity: lesssim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lesssim"
+ ]
+ ]
+ },
+ {
+ "input": "&lesssim;",
+ "description": "Named entity: lesssim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2272"
+ ]
+ ]
+ },
+ {
+ "input": "&lfisht",
+ "description": "Bad named entity: lfisht without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lfisht"
+ ]
+ ]
+ },
+ {
+ "input": "&lfisht;",
+ "description": "Named entity: lfisht; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u297c"
+ ]
+ ]
+ },
+ {
+ "input": "&lfloor",
+ "description": "Bad named entity: lfloor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lfloor"
+ ]
+ ]
+ },
+ {
+ "input": "&lfloor;",
+ "description": "Named entity: lfloor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230a"
+ ]
+ ]
+ },
+ {
+ "input": "&lfr",
+ "description": "Bad named entity: lfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lfr"
+ ]
+ ]
+ },
+ {
+ "input": "&lfr;",
+ "description": "Named entity: lfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd29"
+ ]
+ ]
+ },
+ {
+ "input": "&lg",
+ "description": "Bad named entity: lg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lg"
+ ]
+ ]
+ },
+ {
+ "input": "&lg;",
+ "description": "Named entity: lg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2276"
+ ]
+ ]
+ },
+ {
+ "input": "&lgE",
+ "description": "Bad named entity: lgE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lgE"
+ ]
+ ]
+ },
+ {
+ "input": "&lgE;",
+ "description": "Named entity: lgE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a91"
+ ]
+ ]
+ },
+ {
+ "input": "&lhard",
+ "description": "Bad named entity: lhard without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lhard"
+ ]
+ ]
+ },
+ {
+ "input": "&lhard;",
+ "description": "Named entity: lhard; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bd"
+ ]
+ ]
+ },
+ {
+ "input": "&lharu",
+ "description": "Bad named entity: lharu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lharu"
+ ]
+ ]
+ },
+ {
+ "input": "&lharu;",
+ "description": "Named entity: lharu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bc"
+ ]
+ ]
+ },
+ {
+ "input": "&lharul",
+ "description": "Bad named entity: lharul without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lharul"
+ ]
+ ]
+ },
+ {
+ "input": "&lharul;",
+ "description": "Named entity: lharul; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296a"
+ ]
+ ]
+ },
+ {
+ "input": "&lhblk",
+ "description": "Bad named entity: lhblk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lhblk"
+ ]
+ ]
+ },
+ {
+ "input": "&lhblk;",
+ "description": "Named entity: lhblk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2584"
+ ]
+ ]
+ },
+ {
+ "input": "&ljcy",
+ "description": "Bad named entity: ljcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ljcy"
+ ]
+ ]
+ },
+ {
+ "input": "&ljcy;",
+ "description": "Named entity: ljcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0459"
+ ]
+ ]
+ },
+ {
+ "input": "&ll",
+ "description": "Bad named entity: ll without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ll"
+ ]
+ ]
+ },
+ {
+ "input": "&ll;",
+ "description": "Named entity: ll; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226a"
+ ]
+ ]
+ },
+ {
+ "input": "&llarr",
+ "description": "Bad named entity: llarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&llarr"
+ ]
+ ]
+ },
+ {
+ "input": "&llarr;",
+ "description": "Named entity: llarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c7"
+ ]
+ ]
+ },
+ {
+ "input": "&llcorner",
+ "description": "Bad named entity: llcorner without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&llcorner"
+ ]
+ ]
+ },
+ {
+ "input": "&llcorner;",
+ "description": "Named entity: llcorner; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231e"
+ ]
+ ]
+ },
+ {
+ "input": "&llhard",
+ "description": "Bad named entity: llhard without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&llhard"
+ ]
+ ]
+ },
+ {
+ "input": "&llhard;",
+ "description": "Named entity: llhard; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296b"
+ ]
+ ]
+ },
+ {
+ "input": "&lltri",
+ "description": "Bad named entity: lltri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lltri"
+ ]
+ ]
+ },
+ {
+ "input": "&lltri;",
+ "description": "Named entity: lltri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25fa"
+ ]
+ ]
+ },
+ {
+ "input": "&lmidot",
+ "description": "Bad named entity: lmidot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lmidot"
+ ]
+ ]
+ },
+ {
+ "input": "&lmidot;",
+ "description": "Named entity: lmidot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0140"
+ ]
+ ]
+ },
+ {
+ "input": "&lmoust",
+ "description": "Bad named entity: lmoust without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lmoust"
+ ]
+ ]
+ },
+ {
+ "input": "&lmoust;",
+ "description": "Named entity: lmoust; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b0"
+ ]
+ ]
+ },
+ {
+ "input": "&lmoustache",
+ "description": "Bad named entity: lmoustache without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lmoustache"
+ ]
+ ]
+ },
+ {
+ "input": "&lmoustache;",
+ "description": "Named entity: lmoustache; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b0"
+ ]
+ ]
+ },
+ {
+ "input": "&lnE",
+ "description": "Bad named entity: lnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lnE"
+ ]
+ ]
+ },
+ {
+ "input": "&lnE;",
+ "description": "Named entity: lnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2268"
+ ]
+ ]
+ },
+ {
+ "input": "&lnap",
+ "description": "Bad named entity: lnap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lnap"
+ ]
+ ]
+ },
+ {
+ "input": "&lnap;",
+ "description": "Named entity: lnap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a89"
+ ]
+ ]
+ },
+ {
+ "input": "&lnapprox",
+ "description": "Bad named entity: lnapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lnapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&lnapprox;",
+ "description": "Named entity: lnapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a89"
+ ]
+ ]
+ },
+ {
+ "input": "&lne",
+ "description": "Bad named entity: lne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lne"
+ ]
+ ]
+ },
+ {
+ "input": "&lne;",
+ "description": "Named entity: lne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a87"
+ ]
+ ]
+ },
+ {
+ "input": "&lneq",
+ "description": "Bad named entity: lneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lneq"
+ ]
+ ]
+ },
+ {
+ "input": "&lneq;",
+ "description": "Named entity: lneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a87"
+ ]
+ ]
+ },
+ {
+ "input": "&lneqq",
+ "description": "Bad named entity: lneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&lneqq;",
+ "description": "Named entity: lneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2268"
+ ]
+ ]
+ },
+ {
+ "input": "&lnsim",
+ "description": "Bad named entity: lnsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lnsim"
+ ]
+ ]
+ },
+ {
+ "input": "&lnsim;",
+ "description": "Named entity: lnsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e6"
+ ]
+ ]
+ },
+ {
+ "input": "&loang",
+ "description": "Bad named entity: loang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&loang"
+ ]
+ ]
+ },
+ {
+ "input": "&loang;",
+ "description": "Named entity: loang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27ec"
+ ]
+ ]
+ },
+ {
+ "input": "&loarr",
+ "description": "Bad named entity: loarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&loarr"
+ ]
+ ]
+ },
+ {
+ "input": "&loarr;",
+ "description": "Named entity: loarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21fd"
+ ]
+ ]
+ },
+ {
+ "input": "&lobrk",
+ "description": "Bad named entity: lobrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lobrk"
+ ]
+ ]
+ },
+ {
+ "input": "&lobrk;",
+ "description": "Named entity: lobrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e6"
+ ]
+ ]
+ },
+ {
+ "input": "&longleftarrow",
+ "description": "Bad named entity: longleftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&longleftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&longleftarrow;",
+ "description": "Named entity: longleftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f5"
+ ]
+ ]
+ },
+ {
+ "input": "&longleftrightarrow",
+ "description": "Bad named entity: longleftrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&longleftrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&longleftrightarrow;",
+ "description": "Named entity: longleftrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f7"
+ ]
+ ]
+ },
+ {
+ "input": "&longmapsto",
+ "description": "Bad named entity: longmapsto without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&longmapsto"
+ ]
+ ]
+ },
+ {
+ "input": "&longmapsto;",
+ "description": "Named entity: longmapsto; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27fc"
+ ]
+ ]
+ },
+ {
+ "input": "&longrightarrow",
+ "description": "Bad named entity: longrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&longrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&longrightarrow;",
+ "description": "Named entity: longrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f6"
+ ]
+ ]
+ },
+ {
+ "input": "&looparrowleft",
+ "description": "Bad named entity: looparrowleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&looparrowleft"
+ ]
+ ]
+ },
+ {
+ "input": "&looparrowleft;",
+ "description": "Named entity: looparrowleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ab"
+ ]
+ ]
+ },
+ {
+ "input": "&looparrowright",
+ "description": "Bad named entity: looparrowright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&looparrowright"
+ ]
+ ]
+ },
+ {
+ "input": "&looparrowright;",
+ "description": "Named entity: looparrowright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ac"
+ ]
+ ]
+ },
+ {
+ "input": "&lopar",
+ "description": "Bad named entity: lopar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lopar"
+ ]
+ ]
+ },
+ {
+ "input": "&lopar;",
+ "description": "Named entity: lopar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2985"
+ ]
+ ]
+ },
+ {
+ "input": "&lopf",
+ "description": "Bad named entity: lopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lopf"
+ ]
+ ]
+ },
+ {
+ "input": "&lopf;",
+ "description": "Named entity: lopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd5d"
+ ]
+ ]
+ },
+ {
+ "input": "&loplus",
+ "description": "Bad named entity: loplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&loplus"
+ ]
+ ]
+ },
+ {
+ "input": "&loplus;",
+ "description": "Named entity: loplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a2d"
+ ]
+ ]
+ },
+ {
+ "input": "&lotimes",
+ "description": "Bad named entity: lotimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lotimes"
+ ]
+ ]
+ },
+ {
+ "input": "&lotimes;",
+ "description": "Named entity: lotimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a34"
+ ]
+ ]
+ },
+ {
+ "input": "&lowast",
+ "description": "Bad named entity: lowast without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lowast"
+ ]
+ ]
+ },
+ {
+ "input": "&lowast;",
+ "description": "Named entity: lowast; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2217"
+ ]
+ ]
+ },
+ {
+ "input": "&lowbar",
+ "description": "Bad named entity: lowbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lowbar"
+ ]
+ ]
+ },
+ {
+ "input": "&lowbar;",
+ "description": "Named entity: lowbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "_"
+ ]
+ ]
+ },
+ {
+ "input": "&loz",
+ "description": "Bad named entity: loz without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&loz"
+ ]
+ ]
+ },
+ {
+ "input": "&loz;",
+ "description": "Named entity: loz; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ca"
+ ]
+ ]
+ },
+ {
+ "input": "&lozenge",
+ "description": "Bad named entity: lozenge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lozenge"
+ ]
+ ]
+ },
+ {
+ "input": "&lozenge;",
+ "description": "Named entity: lozenge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ca"
+ ]
+ ]
+ },
+ {
+ "input": "&lozf",
+ "description": "Bad named entity: lozf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lozf"
+ ]
+ ]
+ },
+ {
+ "input": "&lozf;",
+ "description": "Named entity: lozf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29eb"
+ ]
+ ]
+ },
+ {
+ "input": "&lpar",
+ "description": "Bad named entity: lpar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lpar"
+ ]
+ ]
+ },
+ {
+ "input": "&lpar;",
+ "description": "Named entity: lpar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "("
+ ]
+ ]
+ },
+ {
+ "input": "&lparlt",
+ "description": "Bad named entity: lparlt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lparlt"
+ ]
+ ]
+ },
+ {
+ "input": "&lparlt;",
+ "description": "Named entity: lparlt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2993"
+ ]
+ ]
+ },
+ {
+ "input": "&lrarr",
+ "description": "Bad named entity: lrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&lrarr;",
+ "description": "Named entity: lrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c6"
+ ]
+ ]
+ },
+ {
+ "input": "&lrcorner",
+ "description": "Bad named entity: lrcorner without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lrcorner"
+ ]
+ ]
+ },
+ {
+ "input": "&lrcorner;",
+ "description": "Named entity: lrcorner; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231f"
+ ]
+ ]
+ },
+ {
+ "input": "&lrhar",
+ "description": "Bad named entity: lrhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lrhar"
+ ]
+ ]
+ },
+ {
+ "input": "&lrhar;",
+ "description": "Named entity: lrhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cb"
+ ]
+ ]
+ },
+ {
+ "input": "&lrhard",
+ "description": "Bad named entity: lrhard without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lrhard"
+ ]
+ ]
+ },
+ {
+ "input": "&lrhard;",
+ "description": "Named entity: lrhard; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296d"
+ ]
+ ]
+ },
+ {
+ "input": "&lrm",
+ "description": "Bad named entity: lrm without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lrm"
+ ]
+ ]
+ },
+ {
+ "input": "&lrm;",
+ "description": "Named entity: lrm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200e"
+ ]
+ ]
+ },
+ {
+ "input": "&lrtri",
+ "description": "Bad named entity: lrtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lrtri"
+ ]
+ ]
+ },
+ {
+ "input": "&lrtri;",
+ "description": "Named entity: lrtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22bf"
+ ]
+ ]
+ },
+ {
+ "input": "&lsaquo",
+ "description": "Bad named entity: lsaquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsaquo"
+ ]
+ ]
+ },
+ {
+ "input": "&lsaquo;",
+ "description": "Named entity: lsaquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2039"
+ ]
+ ]
+ },
+ {
+ "input": "&lscr",
+ "description": "Bad named entity: lscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lscr"
+ ]
+ ]
+ },
+ {
+ "input": "&lscr;",
+ "description": "Named entity: lscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc1"
+ ]
+ ]
+ },
+ {
+ "input": "&lsh",
+ "description": "Bad named entity: lsh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsh"
+ ]
+ ]
+ },
+ {
+ "input": "&lsh;",
+ "description": "Named entity: lsh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b0"
+ ]
+ ]
+ },
+ {
+ "input": "&lsim",
+ "description": "Bad named entity: lsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsim"
+ ]
+ ]
+ },
+ {
+ "input": "&lsim;",
+ "description": "Named entity: lsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2272"
+ ]
+ ]
+ },
+ {
+ "input": "&lsime",
+ "description": "Bad named entity: lsime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsime"
+ ]
+ ]
+ },
+ {
+ "input": "&lsime;",
+ "description": "Named entity: lsime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8d"
+ ]
+ ]
+ },
+ {
+ "input": "&lsimg",
+ "description": "Bad named entity: lsimg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsimg"
+ ]
+ ]
+ },
+ {
+ "input": "&lsimg;",
+ "description": "Named entity: lsimg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a8f"
+ ]
+ ]
+ },
+ {
+ "input": "&lsqb",
+ "description": "Bad named entity: lsqb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsqb"
+ ]
+ ]
+ },
+ {
+ "input": "&lsqb;",
+ "description": "Named entity: lsqb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "["
+ ]
+ ]
+ },
+ {
+ "input": "&lsquo",
+ "description": "Bad named entity: lsquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsquo"
+ ]
+ ]
+ },
+ {
+ "input": "&lsquo;",
+ "description": "Named entity: lsquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2018"
+ ]
+ ]
+ },
+ {
+ "input": "&lsquor",
+ "description": "Bad named entity: lsquor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lsquor"
+ ]
+ ]
+ },
+ {
+ "input": "&lsquor;",
+ "description": "Named entity: lsquor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201a"
+ ]
+ ]
+ },
+ {
+ "input": "&lstrok",
+ "description": "Bad named entity: lstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&lstrok;",
+ "description": "Named entity: lstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0142"
+ ]
+ ]
+ },
+ {
+ "input": "&lt",
+ "description": "Named entity: lt without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "<"
+ ]
+ ]
+ },
+ {
+ "input": "&lt;",
+ "description": "Named entity: lt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "<"
+ ]
+ ]
+ },
+ {
+ "input": "&ltcc;",
+ "description": "Named entity: ltcc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa6"
+ ]
+ ]
+ },
+ {
+ "input": "&ltcir;",
+ "description": "Named entity: ltcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a79"
+ ]
+ ]
+ },
+ {
+ "input": "&ltdot;",
+ "description": "Named entity: ltdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d6"
+ ]
+ ]
+ },
+ {
+ "input": "&lthree;",
+ "description": "Named entity: lthree; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cb"
+ ]
+ ]
+ },
+ {
+ "input": "&ltimes;",
+ "description": "Named entity: ltimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c9"
+ ]
+ ]
+ },
+ {
+ "input": "&ltlarr;",
+ "description": "Named entity: ltlarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2976"
+ ]
+ ]
+ },
+ {
+ "input": "&ltquest;",
+ "description": "Named entity: ltquest; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7b"
+ ]
+ ]
+ },
+ {
+ "input": "&ltrPar;",
+ "description": "Named entity: ltrPar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2996"
+ ]
+ ]
+ },
+ {
+ "input": "&ltri;",
+ "description": "Named entity: ltri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25c3"
+ ]
+ ]
+ },
+ {
+ "input": "&ltrie;",
+ "description": "Named entity: ltrie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b4"
+ ]
+ ]
+ },
+ {
+ "input": "&ltrif;",
+ "description": "Named entity: ltrif; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25c2"
+ ]
+ ]
+ },
+ {
+ "input": "&lurdshar",
+ "description": "Bad named entity: lurdshar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lurdshar"
+ ]
+ ]
+ },
+ {
+ "input": "&lurdshar;",
+ "description": "Named entity: lurdshar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u294a"
+ ]
+ ]
+ },
+ {
+ "input": "&luruhar",
+ "description": "Bad named entity: luruhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&luruhar"
+ ]
+ ]
+ },
+ {
+ "input": "&luruhar;",
+ "description": "Named entity: luruhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2966"
+ ]
+ ]
+ },
+ {
+ "input": "&lvertneqq",
+ "description": "Bad named entity: lvertneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lvertneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&lvertneqq;",
+ "description": "Named entity: lvertneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2268\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&lvnE",
+ "description": "Bad named entity: lvnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&lvnE"
+ ]
+ ]
+ },
+ {
+ "input": "&lvnE;",
+ "description": "Named entity: lvnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2268\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&mDDot",
+ "description": "Bad named entity: mDDot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mDDot"
+ ]
+ ]
+ },
+ {
+ "input": "&mDDot;",
+ "description": "Named entity: mDDot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223a"
+ ]
+ ]
+ },
+ {
+ "input": "&macr",
+ "description": "Named entity: macr without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00af"
+ ]
+ ]
+ },
+ {
+ "input": "&macr;",
+ "description": "Named entity: macr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00af"
+ ]
+ ]
+ },
+ {
+ "input": "&male",
+ "description": "Bad named entity: male without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&male"
+ ]
+ ]
+ },
+ {
+ "input": "&male;",
+ "description": "Named entity: male; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2642"
+ ]
+ ]
+ },
+ {
+ "input": "&malt",
+ "description": "Bad named entity: malt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&malt"
+ ]
+ ]
+ },
+ {
+ "input": "&malt;",
+ "description": "Named entity: malt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2720"
+ ]
+ ]
+ },
+ {
+ "input": "&maltese",
+ "description": "Bad named entity: maltese without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&maltese"
+ ]
+ ]
+ },
+ {
+ "input": "&maltese;",
+ "description": "Named entity: maltese; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2720"
+ ]
+ ]
+ },
+ {
+ "input": "&map",
+ "description": "Bad named entity: map without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&map"
+ ]
+ ]
+ },
+ {
+ "input": "&map;",
+ "description": "Named entity: map; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a6"
+ ]
+ ]
+ },
+ {
+ "input": "&mapsto",
+ "description": "Bad named entity: mapsto without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mapsto"
+ ]
+ ]
+ },
+ {
+ "input": "&mapsto;",
+ "description": "Named entity: mapsto; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a6"
+ ]
+ ]
+ },
+ {
+ "input": "&mapstodown",
+ "description": "Bad named entity: mapstodown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mapstodown"
+ ]
+ ]
+ },
+ {
+ "input": "&mapstodown;",
+ "description": "Named entity: mapstodown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a7"
+ ]
+ ]
+ },
+ {
+ "input": "&mapstoleft",
+ "description": "Bad named entity: mapstoleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mapstoleft"
+ ]
+ ]
+ },
+ {
+ "input": "&mapstoleft;",
+ "description": "Named entity: mapstoleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a4"
+ ]
+ ]
+ },
+ {
+ "input": "&mapstoup",
+ "description": "Bad named entity: mapstoup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mapstoup"
+ ]
+ ]
+ },
+ {
+ "input": "&mapstoup;",
+ "description": "Named entity: mapstoup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a5"
+ ]
+ ]
+ },
+ {
+ "input": "&marker",
+ "description": "Bad named entity: marker without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&marker"
+ ]
+ ]
+ },
+ {
+ "input": "&marker;",
+ "description": "Named entity: marker; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ae"
+ ]
+ ]
+ },
+ {
+ "input": "&mcomma",
+ "description": "Bad named entity: mcomma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mcomma"
+ ]
+ ]
+ },
+ {
+ "input": "&mcomma;",
+ "description": "Named entity: mcomma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a29"
+ ]
+ ]
+ },
+ {
+ "input": "&mcy",
+ "description": "Bad named entity: mcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mcy"
+ ]
+ ]
+ },
+ {
+ "input": "&mcy;",
+ "description": "Named entity: mcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u043c"
+ ]
+ ]
+ },
+ {
+ "input": "&mdash",
+ "description": "Bad named entity: mdash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mdash"
+ ]
+ ]
+ },
+ {
+ "input": "&mdash;",
+ "description": "Named entity: mdash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2014"
+ ]
+ ]
+ },
+ {
+ "input": "&measuredangle",
+ "description": "Bad named entity: measuredangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&measuredangle"
+ ]
+ ]
+ },
+ {
+ "input": "&measuredangle;",
+ "description": "Named entity: measuredangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2221"
+ ]
+ ]
+ },
+ {
+ "input": "&mfr",
+ "description": "Bad named entity: mfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mfr"
+ ]
+ ]
+ },
+ {
+ "input": "&mfr;",
+ "description": "Named entity: mfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd2a"
+ ]
+ ]
+ },
+ {
+ "input": "&mho",
+ "description": "Bad named entity: mho without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mho"
+ ]
+ ]
+ },
+ {
+ "input": "&mho;",
+ "description": "Named entity: mho; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2127"
+ ]
+ ]
+ },
+ {
+ "input": "&micro",
+ "description": "Named entity: micro without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b5"
+ ]
+ ]
+ },
+ {
+ "input": "&micro;",
+ "description": "Named entity: micro; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b5"
+ ]
+ ]
+ },
+ {
+ "input": "&mid",
+ "description": "Bad named entity: mid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mid"
+ ]
+ ]
+ },
+ {
+ "input": "&mid;",
+ "description": "Named entity: mid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2223"
+ ]
+ ]
+ },
+ {
+ "input": "&midast",
+ "description": "Bad named entity: midast without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&midast"
+ ]
+ ]
+ },
+ {
+ "input": "&midast;",
+ "description": "Named entity: midast; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "*"
+ ]
+ ]
+ },
+ {
+ "input": "&midcir",
+ "description": "Bad named entity: midcir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&midcir"
+ ]
+ ]
+ },
+ {
+ "input": "&midcir;",
+ "description": "Named entity: midcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2af0"
+ ]
+ ]
+ },
+ {
+ "input": "&middot",
+ "description": "Named entity: middot without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b7"
+ ]
+ ]
+ },
+ {
+ "input": "&middot;",
+ "description": "Named entity: middot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b7"
+ ]
+ ]
+ },
+ {
+ "input": "&minus",
+ "description": "Bad named entity: minus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&minus"
+ ]
+ ]
+ },
+ {
+ "input": "&minus;",
+ "description": "Named entity: minus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2212"
+ ]
+ ]
+ },
+ {
+ "input": "&minusb",
+ "description": "Bad named entity: minusb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&minusb"
+ ]
+ ]
+ },
+ {
+ "input": "&minusb;",
+ "description": "Named entity: minusb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229f"
+ ]
+ ]
+ },
+ {
+ "input": "&minusd",
+ "description": "Bad named entity: minusd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&minusd"
+ ]
+ ]
+ },
+ {
+ "input": "&minusd;",
+ "description": "Named entity: minusd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2238"
+ ]
+ ]
+ },
+ {
+ "input": "&minusdu",
+ "description": "Bad named entity: minusdu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&minusdu"
+ ]
+ ]
+ },
+ {
+ "input": "&minusdu;",
+ "description": "Named entity: minusdu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a2a"
+ ]
+ ]
+ },
+ {
+ "input": "&mlcp",
+ "description": "Bad named entity: mlcp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mlcp"
+ ]
+ ]
+ },
+ {
+ "input": "&mlcp;",
+ "description": "Named entity: mlcp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2adb"
+ ]
+ ]
+ },
+ {
+ "input": "&mldr",
+ "description": "Bad named entity: mldr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mldr"
+ ]
+ ]
+ },
+ {
+ "input": "&mldr;",
+ "description": "Named entity: mldr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2026"
+ ]
+ ]
+ },
+ {
+ "input": "&mnplus",
+ "description": "Bad named entity: mnplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mnplus"
+ ]
+ ]
+ },
+ {
+ "input": "&mnplus;",
+ "description": "Named entity: mnplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2213"
+ ]
+ ]
+ },
+ {
+ "input": "&models",
+ "description": "Bad named entity: models without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&models"
+ ]
+ ]
+ },
+ {
+ "input": "&models;",
+ "description": "Named entity: models; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a7"
+ ]
+ ]
+ },
+ {
+ "input": "&mopf",
+ "description": "Bad named entity: mopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mopf"
+ ]
+ ]
+ },
+ {
+ "input": "&mopf;",
+ "description": "Named entity: mopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd5e"
+ ]
+ ]
+ },
+ {
+ "input": "&mp",
+ "description": "Bad named entity: mp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mp"
+ ]
+ ]
+ },
+ {
+ "input": "&mp;",
+ "description": "Named entity: mp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2213"
+ ]
+ ]
+ },
+ {
+ "input": "&mscr",
+ "description": "Bad named entity: mscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mscr"
+ ]
+ ]
+ },
+ {
+ "input": "&mscr;",
+ "description": "Named entity: mscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc2"
+ ]
+ ]
+ },
+ {
+ "input": "&mstpos",
+ "description": "Bad named entity: mstpos without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mstpos"
+ ]
+ ]
+ },
+ {
+ "input": "&mstpos;",
+ "description": "Named entity: mstpos; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223e"
+ ]
+ ]
+ },
+ {
+ "input": "&mu",
+ "description": "Bad named entity: mu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mu"
+ ]
+ ]
+ },
+ {
+ "input": "&mu;",
+ "description": "Named entity: mu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03bc"
+ ]
+ ]
+ },
+ {
+ "input": "&multimap",
+ "description": "Bad named entity: multimap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&multimap"
+ ]
+ ]
+ },
+ {
+ "input": "&multimap;",
+ "description": "Named entity: multimap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b8"
+ ]
+ ]
+ },
+ {
+ "input": "&mumap",
+ "description": "Bad named entity: mumap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&mumap"
+ ]
+ ]
+ },
+ {
+ "input": "&mumap;",
+ "description": "Named entity: mumap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b8"
+ ]
+ ]
+ },
+ {
+ "input": "&nGg",
+ "description": "Bad named entity: nGg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nGg"
+ ]
+ ]
+ },
+ {
+ "input": "&nGg;",
+ "description": "Named entity: nGg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d9\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nGt",
+ "description": "Bad named entity: nGt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nGt"
+ ]
+ ]
+ },
+ {
+ "input": "&nGt;",
+ "description": "Named entity: nGt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226b\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nGtv",
+ "description": "Bad named entity: nGtv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nGtv"
+ ]
+ ]
+ },
+ {
+ "input": "&nGtv;",
+ "description": "Named entity: nGtv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226b\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nLeftarrow",
+ "description": "Bad named entity: nLeftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nLeftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nLeftarrow;",
+ "description": "Named entity: nLeftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cd"
+ ]
+ ]
+ },
+ {
+ "input": "&nLeftrightarrow",
+ "description": "Bad named entity: nLeftrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nLeftrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nLeftrightarrow;",
+ "description": "Named entity: nLeftrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ce"
+ ]
+ ]
+ },
+ {
+ "input": "&nLl",
+ "description": "Bad named entity: nLl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nLl"
+ ]
+ ]
+ },
+ {
+ "input": "&nLl;",
+ "description": "Named entity: nLl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d8\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nLt",
+ "description": "Bad named entity: nLt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nLt"
+ ]
+ ]
+ },
+ {
+ "input": "&nLt;",
+ "description": "Named entity: nLt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226a\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nLtv",
+ "description": "Bad named entity: nLtv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nLtv"
+ ]
+ ]
+ },
+ {
+ "input": "&nLtv;",
+ "description": "Named entity: nLtv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226a\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nRightarrow",
+ "description": "Bad named entity: nRightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nRightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nRightarrow;",
+ "description": "Named entity: nRightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cf"
+ ]
+ ]
+ },
+ {
+ "input": "&nVDash",
+ "description": "Bad named entity: nVDash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nVDash"
+ ]
+ ]
+ },
+ {
+ "input": "&nVDash;",
+ "description": "Named entity: nVDash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22af"
+ ]
+ ]
+ },
+ {
+ "input": "&nVdash",
+ "description": "Bad named entity: nVdash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nVdash"
+ ]
+ ]
+ },
+ {
+ "input": "&nVdash;",
+ "description": "Named entity: nVdash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ae"
+ ]
+ ]
+ },
+ {
+ "input": "&nabla",
+ "description": "Bad named entity: nabla without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nabla"
+ ]
+ ]
+ },
+ {
+ "input": "&nabla;",
+ "description": "Named entity: nabla; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2207"
+ ]
+ ]
+ },
+ {
+ "input": "&nacute",
+ "description": "Bad named entity: nacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nacute"
+ ]
+ ]
+ },
+ {
+ "input": "&nacute;",
+ "description": "Named entity: nacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0144"
+ ]
+ ]
+ },
+ {
+ "input": "&nang",
+ "description": "Bad named entity: nang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nang"
+ ]
+ ]
+ },
+ {
+ "input": "&nang;",
+ "description": "Named entity: nang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2220\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nap",
+ "description": "Bad named entity: nap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nap"
+ ]
+ ]
+ },
+ {
+ "input": "&nap;",
+ "description": "Named entity: nap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2249"
+ ]
+ ]
+ },
+ {
+ "input": "&napE",
+ "description": "Bad named entity: napE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&napE"
+ ]
+ ]
+ },
+ {
+ "input": "&napE;",
+ "description": "Named entity: napE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a70\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&napid",
+ "description": "Bad named entity: napid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&napid"
+ ]
+ ]
+ },
+ {
+ "input": "&napid;",
+ "description": "Named entity: napid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224b\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&napos",
+ "description": "Bad named entity: napos without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&napos"
+ ]
+ ]
+ },
+ {
+ "input": "&napos;",
+ "description": "Named entity: napos; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0149"
+ ]
+ ]
+ },
+ {
+ "input": "&napprox",
+ "description": "Bad named entity: napprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&napprox"
+ ]
+ ]
+ },
+ {
+ "input": "&napprox;",
+ "description": "Named entity: napprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2249"
+ ]
+ ]
+ },
+ {
+ "input": "&natur",
+ "description": "Bad named entity: natur without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&natur"
+ ]
+ ]
+ },
+ {
+ "input": "&natur;",
+ "description": "Named entity: natur; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u266e"
+ ]
+ ]
+ },
+ {
+ "input": "&natural",
+ "description": "Bad named entity: natural without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&natural"
+ ]
+ ]
+ },
+ {
+ "input": "&natural;",
+ "description": "Named entity: natural; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u266e"
+ ]
+ ]
+ },
+ {
+ "input": "&naturals",
+ "description": "Bad named entity: naturals without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&naturals"
+ ]
+ ]
+ },
+ {
+ "input": "&naturals;",
+ "description": "Named entity: naturals; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2115"
+ ]
+ ]
+ },
+ {
+ "input": "&nbsp",
+ "description": "Named entity: nbsp without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a0"
+ ]
+ ]
+ },
+ {
+ "input": "&nbsp;",
+ "description": "Named entity: nbsp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a0"
+ ]
+ ]
+ },
+ {
+ "input": "&nbump",
+ "description": "Bad named entity: nbump without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nbump"
+ ]
+ ]
+ },
+ {
+ "input": "&nbump;",
+ "description": "Named entity: nbump; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224e\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nbumpe",
+ "description": "Bad named entity: nbumpe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nbumpe"
+ ]
+ ]
+ },
+ {
+ "input": "&nbumpe;",
+ "description": "Named entity: nbumpe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224f\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&ncap",
+ "description": "Bad named entity: ncap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncap"
+ ]
+ ]
+ },
+ {
+ "input": "&ncap;",
+ "description": "Named entity: ncap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a43"
+ ]
+ ]
+ },
+ {
+ "input": "&ncaron",
+ "description": "Bad named entity: ncaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncaron"
+ ]
+ ]
+ },
+ {
+ "input": "&ncaron;",
+ "description": "Named entity: ncaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0148"
+ ]
+ ]
+ },
+ {
+ "input": "&ncedil",
+ "description": "Bad named entity: ncedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncedil"
+ ]
+ ]
+ },
+ {
+ "input": "&ncedil;",
+ "description": "Named entity: ncedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0146"
+ ]
+ ]
+ },
+ {
+ "input": "&ncong",
+ "description": "Bad named entity: ncong without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncong"
+ ]
+ ]
+ },
+ {
+ "input": "&ncong;",
+ "description": "Named entity: ncong; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2247"
+ ]
+ ]
+ },
+ {
+ "input": "&ncongdot",
+ "description": "Bad named entity: ncongdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncongdot"
+ ]
+ ]
+ },
+ {
+ "input": "&ncongdot;",
+ "description": "Named entity: ncongdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a6d\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&ncup",
+ "description": "Bad named entity: ncup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncup"
+ ]
+ ]
+ },
+ {
+ "input": "&ncup;",
+ "description": "Named entity: ncup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a42"
+ ]
+ ]
+ },
+ {
+ "input": "&ncy",
+ "description": "Bad named entity: ncy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ncy"
+ ]
+ ]
+ },
+ {
+ "input": "&ncy;",
+ "description": "Named entity: ncy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u043d"
+ ]
+ ]
+ },
+ {
+ "input": "&ndash",
+ "description": "Bad named entity: ndash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ndash"
+ ]
+ ]
+ },
+ {
+ "input": "&ndash;",
+ "description": "Named entity: ndash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2013"
+ ]
+ ]
+ },
+ {
+ "input": "&ne",
+ "description": "Bad named entity: ne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ne"
+ ]
+ ]
+ },
+ {
+ "input": "&ne;",
+ "description": "Named entity: ne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2260"
+ ]
+ ]
+ },
+ {
+ "input": "&neArr",
+ "description": "Bad named entity: neArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&neArr"
+ ]
+ ]
+ },
+ {
+ "input": "&neArr;",
+ "description": "Named entity: neArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d7"
+ ]
+ ]
+ },
+ {
+ "input": "&nearhk",
+ "description": "Bad named entity: nearhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nearhk"
+ ]
+ ]
+ },
+ {
+ "input": "&nearhk;",
+ "description": "Named entity: nearhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2924"
+ ]
+ ]
+ },
+ {
+ "input": "&nearr",
+ "description": "Bad named entity: nearr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nearr"
+ ]
+ ]
+ },
+ {
+ "input": "&nearr;",
+ "description": "Named entity: nearr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2197"
+ ]
+ ]
+ },
+ {
+ "input": "&nearrow",
+ "description": "Bad named entity: nearrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nearrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nearrow;",
+ "description": "Named entity: nearrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2197"
+ ]
+ ]
+ },
+ {
+ "input": "&nedot",
+ "description": "Bad named entity: nedot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nedot"
+ ]
+ ]
+ },
+ {
+ "input": "&nedot;",
+ "description": "Named entity: nedot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2250\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nequiv",
+ "description": "Bad named entity: nequiv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nequiv"
+ ]
+ ]
+ },
+ {
+ "input": "&nequiv;",
+ "description": "Named entity: nequiv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2262"
+ ]
+ ]
+ },
+ {
+ "input": "&nesear",
+ "description": "Bad named entity: nesear without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nesear"
+ ]
+ ]
+ },
+ {
+ "input": "&nesear;",
+ "description": "Named entity: nesear; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2928"
+ ]
+ ]
+ },
+ {
+ "input": "&nesim",
+ "description": "Bad named entity: nesim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nesim"
+ ]
+ ]
+ },
+ {
+ "input": "&nesim;",
+ "description": "Named entity: nesim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2242\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nexist",
+ "description": "Bad named entity: nexist without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nexist"
+ ]
+ ]
+ },
+ {
+ "input": "&nexist;",
+ "description": "Named entity: nexist; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2204"
+ ]
+ ]
+ },
+ {
+ "input": "&nexists",
+ "description": "Bad named entity: nexists without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nexists"
+ ]
+ ]
+ },
+ {
+ "input": "&nexists;",
+ "description": "Named entity: nexists; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2204"
+ ]
+ ]
+ },
+ {
+ "input": "&nfr",
+ "description": "Bad named entity: nfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nfr"
+ ]
+ ]
+ },
+ {
+ "input": "&nfr;",
+ "description": "Named entity: nfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd2b"
+ ]
+ ]
+ },
+ {
+ "input": "&ngE",
+ "description": "Bad named entity: ngE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngE"
+ ]
+ ]
+ },
+ {
+ "input": "&ngE;",
+ "description": "Named entity: ngE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2267\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nge",
+ "description": "Bad named entity: nge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nge"
+ ]
+ ]
+ },
+ {
+ "input": "&nge;",
+ "description": "Named entity: nge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2271"
+ ]
+ ]
+ },
+ {
+ "input": "&ngeq",
+ "description": "Bad named entity: ngeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngeq"
+ ]
+ ]
+ },
+ {
+ "input": "&ngeq;",
+ "description": "Named entity: ngeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2271"
+ ]
+ ]
+ },
+ {
+ "input": "&ngeqq",
+ "description": "Bad named entity: ngeqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngeqq"
+ ]
+ ]
+ },
+ {
+ "input": "&ngeqq;",
+ "description": "Named entity: ngeqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2267\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&ngeqslant",
+ "description": "Bad named entity: ngeqslant without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngeqslant"
+ ]
+ ]
+ },
+ {
+ "input": "&ngeqslant;",
+ "description": "Named entity: ngeqslant; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7e\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nges",
+ "description": "Bad named entity: nges without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nges"
+ ]
+ ]
+ },
+ {
+ "input": "&nges;",
+ "description": "Named entity: nges; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7e\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&ngsim",
+ "description": "Bad named entity: ngsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngsim"
+ ]
+ ]
+ },
+ {
+ "input": "&ngsim;",
+ "description": "Named entity: ngsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2275"
+ ]
+ ]
+ },
+ {
+ "input": "&ngt",
+ "description": "Bad named entity: ngt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngt"
+ ]
+ ]
+ },
+ {
+ "input": "&ngt;",
+ "description": "Named entity: ngt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226f"
+ ]
+ ]
+ },
+ {
+ "input": "&ngtr",
+ "description": "Bad named entity: ngtr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ngtr"
+ ]
+ ]
+ },
+ {
+ "input": "&ngtr;",
+ "description": "Named entity: ngtr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226f"
+ ]
+ ]
+ },
+ {
+ "input": "&nhArr",
+ "description": "Bad named entity: nhArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nhArr"
+ ]
+ ]
+ },
+ {
+ "input": "&nhArr;",
+ "description": "Named entity: nhArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ce"
+ ]
+ ]
+ },
+ {
+ "input": "&nharr",
+ "description": "Bad named entity: nharr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nharr"
+ ]
+ ]
+ },
+ {
+ "input": "&nharr;",
+ "description": "Named entity: nharr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ae"
+ ]
+ ]
+ },
+ {
+ "input": "&nhpar",
+ "description": "Bad named entity: nhpar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nhpar"
+ ]
+ ]
+ },
+ {
+ "input": "&nhpar;",
+ "description": "Named entity: nhpar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2af2"
+ ]
+ ]
+ },
+ {
+ "input": "&ni",
+ "description": "Bad named entity: ni without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ni"
+ ]
+ ]
+ },
+ {
+ "input": "&ni;",
+ "description": "Named entity: ni; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220b"
+ ]
+ ]
+ },
+ {
+ "input": "&nis",
+ "description": "Bad named entity: nis without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nis"
+ ]
+ ]
+ },
+ {
+ "input": "&nis;",
+ "description": "Named entity: nis; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22fc"
+ ]
+ ]
+ },
+ {
+ "input": "&nisd",
+ "description": "Bad named entity: nisd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nisd"
+ ]
+ ]
+ },
+ {
+ "input": "&nisd;",
+ "description": "Named entity: nisd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22fa"
+ ]
+ ]
+ },
+ {
+ "input": "&niv",
+ "description": "Bad named entity: niv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&niv"
+ ]
+ ]
+ },
+ {
+ "input": "&niv;",
+ "description": "Named entity: niv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220b"
+ ]
+ ]
+ },
+ {
+ "input": "&njcy",
+ "description": "Bad named entity: njcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&njcy"
+ ]
+ ]
+ },
+ {
+ "input": "&njcy;",
+ "description": "Named entity: njcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u045a"
+ ]
+ ]
+ },
+ {
+ "input": "&nlArr",
+ "description": "Bad named entity: nlArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nlArr"
+ ]
+ ]
+ },
+ {
+ "input": "&nlArr;",
+ "description": "Named entity: nlArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cd"
+ ]
+ ]
+ },
+ {
+ "input": "&nlE",
+ "description": "Bad named entity: nlE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nlE"
+ ]
+ ]
+ },
+ {
+ "input": "&nlE;",
+ "description": "Named entity: nlE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2266\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nlarr",
+ "description": "Bad named entity: nlarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nlarr"
+ ]
+ ]
+ },
+ {
+ "input": "&nlarr;",
+ "description": "Named entity: nlarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219a"
+ ]
+ ]
+ },
+ {
+ "input": "&nldr",
+ "description": "Bad named entity: nldr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nldr"
+ ]
+ ]
+ },
+ {
+ "input": "&nldr;",
+ "description": "Named entity: nldr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2025"
+ ]
+ ]
+ },
+ {
+ "input": "&nle",
+ "description": "Bad named entity: nle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nle"
+ ]
+ ]
+ },
+ {
+ "input": "&nle;",
+ "description": "Named entity: nle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2270"
+ ]
+ ]
+ },
+ {
+ "input": "&nleftarrow",
+ "description": "Bad named entity: nleftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nleftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nleftarrow;",
+ "description": "Named entity: nleftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219a"
+ ]
+ ]
+ },
+ {
+ "input": "&nleftrightarrow",
+ "description": "Bad named entity: nleftrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nleftrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nleftrightarrow;",
+ "description": "Named entity: nleftrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ae"
+ ]
+ ]
+ },
+ {
+ "input": "&nleq",
+ "description": "Bad named entity: nleq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nleq"
+ ]
+ ]
+ },
+ {
+ "input": "&nleq;",
+ "description": "Named entity: nleq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2270"
+ ]
+ ]
+ },
+ {
+ "input": "&nleqq",
+ "description": "Bad named entity: nleqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nleqq"
+ ]
+ ]
+ },
+ {
+ "input": "&nleqq;",
+ "description": "Named entity: nleqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2266\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nleqslant",
+ "description": "Bad named entity: nleqslant without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nleqslant"
+ ]
+ ]
+ },
+ {
+ "input": "&nleqslant;",
+ "description": "Named entity: nleqslant; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7d\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nles",
+ "description": "Bad named entity: nles without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nles"
+ ]
+ ]
+ },
+ {
+ "input": "&nles;",
+ "description": "Named entity: nles; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a7d\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nless",
+ "description": "Bad named entity: nless without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nless"
+ ]
+ ]
+ },
+ {
+ "input": "&nless;",
+ "description": "Named entity: nless; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226e"
+ ]
+ ]
+ },
+ {
+ "input": "&nlsim",
+ "description": "Bad named entity: nlsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nlsim"
+ ]
+ ]
+ },
+ {
+ "input": "&nlsim;",
+ "description": "Named entity: nlsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2274"
+ ]
+ ]
+ },
+ {
+ "input": "&nlt",
+ "description": "Bad named entity: nlt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nlt"
+ ]
+ ]
+ },
+ {
+ "input": "&nlt;",
+ "description": "Named entity: nlt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226e"
+ ]
+ ]
+ },
+ {
+ "input": "&nltri",
+ "description": "Bad named entity: nltri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nltri"
+ ]
+ ]
+ },
+ {
+ "input": "&nltri;",
+ "description": "Named entity: nltri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ea"
+ ]
+ ]
+ },
+ {
+ "input": "&nltrie",
+ "description": "Bad named entity: nltrie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nltrie"
+ ]
+ ]
+ },
+ {
+ "input": "&nltrie;",
+ "description": "Named entity: nltrie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ec"
+ ]
+ ]
+ },
+ {
+ "input": "&nmid",
+ "description": "Bad named entity: nmid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nmid"
+ ]
+ ]
+ },
+ {
+ "input": "&nmid;",
+ "description": "Named entity: nmid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2224"
+ ]
+ ]
+ },
+ {
+ "input": "&nopf",
+ "description": "Bad named entity: nopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nopf"
+ ]
+ ]
+ },
+ {
+ "input": "&nopf;",
+ "description": "Named entity: nopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd5f"
+ ]
+ ]
+ },
+ {
+ "input": "&not",
+ "description": "Named entity: not without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ac"
+ ]
+ ]
+ },
+ {
+ "input": "&not;",
+ "description": "Named entity: not; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ac"
+ ]
+ ]
+ },
+ {
+ "input": "&notin;",
+ "description": "Named entity: notin; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2209"
+ ]
+ ]
+ },
+ {
+ "input": "&notinE;",
+ "description": "Named entity: notinE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f9\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&notindot;",
+ "description": "Named entity: notindot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f5\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&notinva;",
+ "description": "Named entity: notinva; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2209"
+ ]
+ ]
+ },
+ {
+ "input": "&notinvb;",
+ "description": "Named entity: notinvb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f7"
+ ]
+ ]
+ },
+ {
+ "input": "&notinvc;",
+ "description": "Named entity: notinvc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f6"
+ ]
+ ]
+ },
+ {
+ "input": "&notni;",
+ "description": "Named entity: notni; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220c"
+ ]
+ ]
+ },
+ {
+ "input": "&notniva;",
+ "description": "Named entity: notniva; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220c"
+ ]
+ ]
+ },
+ {
+ "input": "&notnivb;",
+ "description": "Named entity: notnivb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22fe"
+ ]
+ ]
+ },
+ {
+ "input": "&notnivc;",
+ "description": "Named entity: notnivc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22fd"
+ ]
+ ]
+ },
+ {
+ "input": "&npar",
+ "description": "Bad named entity: npar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&npar"
+ ]
+ ]
+ },
+ {
+ "input": "&npar;",
+ "description": "Named entity: npar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2226"
+ ]
+ ]
+ },
+ {
+ "input": "&nparallel",
+ "description": "Bad named entity: nparallel without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nparallel"
+ ]
+ ]
+ },
+ {
+ "input": "&nparallel;",
+ "description": "Named entity: nparallel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2226"
+ ]
+ ]
+ },
+ {
+ "input": "&nparsl",
+ "description": "Bad named entity: nparsl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nparsl"
+ ]
+ ]
+ },
+ {
+ "input": "&nparsl;",
+ "description": "Named entity: nparsl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2afd\u20e5"
+ ]
+ ]
+ },
+ {
+ "input": "&npart",
+ "description": "Bad named entity: npart without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&npart"
+ ]
+ ]
+ },
+ {
+ "input": "&npart;",
+ "description": "Named entity: npart; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2202\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&npolint",
+ "description": "Bad named entity: npolint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&npolint"
+ ]
+ ]
+ },
+ {
+ "input": "&npolint;",
+ "description": "Named entity: npolint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a14"
+ ]
+ ]
+ },
+ {
+ "input": "&npr",
+ "description": "Bad named entity: npr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&npr"
+ ]
+ ]
+ },
+ {
+ "input": "&npr;",
+ "description": "Named entity: npr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2280"
+ ]
+ ]
+ },
+ {
+ "input": "&nprcue",
+ "description": "Bad named entity: nprcue without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nprcue"
+ ]
+ ]
+ },
+ {
+ "input": "&nprcue;",
+ "description": "Named entity: nprcue; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e0"
+ ]
+ ]
+ },
+ {
+ "input": "&npre",
+ "description": "Bad named entity: npre without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&npre"
+ ]
+ ]
+ },
+ {
+ "input": "&npre;",
+ "description": "Named entity: npre; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaf\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nprec",
+ "description": "Bad named entity: nprec without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nprec"
+ ]
+ ]
+ },
+ {
+ "input": "&nprec;",
+ "description": "Named entity: nprec; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2280"
+ ]
+ ]
+ },
+ {
+ "input": "&npreceq",
+ "description": "Bad named entity: npreceq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&npreceq"
+ ]
+ ]
+ },
+ {
+ "input": "&npreceq;",
+ "description": "Named entity: npreceq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaf\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nrArr",
+ "description": "Bad named entity: nrArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrArr"
+ ]
+ ]
+ },
+ {
+ "input": "&nrArr;",
+ "description": "Named entity: nrArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cf"
+ ]
+ ]
+ },
+ {
+ "input": "&nrarr",
+ "description": "Bad named entity: nrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&nrarr;",
+ "description": "Named entity: nrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219b"
+ ]
+ ]
+ },
+ {
+ "input": "&nrarrc",
+ "description": "Bad named entity: nrarrc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrarrc"
+ ]
+ ]
+ },
+ {
+ "input": "&nrarrc;",
+ "description": "Named entity: nrarrc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2933\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nrarrw",
+ "description": "Bad named entity: nrarrw without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrarrw"
+ ]
+ ]
+ },
+ {
+ "input": "&nrarrw;",
+ "description": "Named entity: nrarrw; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219d\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nrightarrow",
+ "description": "Bad named entity: nrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nrightarrow;",
+ "description": "Named entity: nrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219b"
+ ]
+ ]
+ },
+ {
+ "input": "&nrtri",
+ "description": "Bad named entity: nrtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrtri"
+ ]
+ ]
+ },
+ {
+ "input": "&nrtri;",
+ "description": "Named entity: nrtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22eb"
+ ]
+ ]
+ },
+ {
+ "input": "&nrtrie",
+ "description": "Bad named entity: nrtrie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nrtrie"
+ ]
+ ]
+ },
+ {
+ "input": "&nrtrie;",
+ "description": "Named entity: nrtrie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ed"
+ ]
+ ]
+ },
+ {
+ "input": "&nsc",
+ "description": "Bad named entity: nsc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsc"
+ ]
+ ]
+ },
+ {
+ "input": "&nsc;",
+ "description": "Named entity: nsc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2281"
+ ]
+ ]
+ },
+ {
+ "input": "&nsccue",
+ "description": "Bad named entity: nsccue without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsccue"
+ ]
+ ]
+ },
+ {
+ "input": "&nsccue;",
+ "description": "Named entity: nsccue; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e1"
+ ]
+ ]
+ },
+ {
+ "input": "&nsce",
+ "description": "Bad named entity: nsce without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsce"
+ ]
+ ]
+ },
+ {
+ "input": "&nsce;",
+ "description": "Named entity: nsce; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab0\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nscr",
+ "description": "Bad named entity: nscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nscr"
+ ]
+ ]
+ },
+ {
+ "input": "&nscr;",
+ "description": "Named entity: nscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc3"
+ ]
+ ]
+ },
+ {
+ "input": "&nshortmid",
+ "description": "Bad named entity: nshortmid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nshortmid"
+ ]
+ ]
+ },
+ {
+ "input": "&nshortmid;",
+ "description": "Named entity: nshortmid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2224"
+ ]
+ ]
+ },
+ {
+ "input": "&nshortparallel",
+ "description": "Bad named entity: nshortparallel without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nshortparallel"
+ ]
+ ]
+ },
+ {
+ "input": "&nshortparallel;",
+ "description": "Named entity: nshortparallel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2226"
+ ]
+ ]
+ },
+ {
+ "input": "&nsim",
+ "description": "Bad named entity: nsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsim"
+ ]
+ ]
+ },
+ {
+ "input": "&nsim;",
+ "description": "Named entity: nsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2241"
+ ]
+ ]
+ },
+ {
+ "input": "&nsime",
+ "description": "Bad named entity: nsime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsime"
+ ]
+ ]
+ },
+ {
+ "input": "&nsime;",
+ "description": "Named entity: nsime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2244"
+ ]
+ ]
+ },
+ {
+ "input": "&nsimeq",
+ "description": "Bad named entity: nsimeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsimeq"
+ ]
+ ]
+ },
+ {
+ "input": "&nsimeq;",
+ "description": "Named entity: nsimeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2244"
+ ]
+ ]
+ },
+ {
+ "input": "&nsmid",
+ "description": "Bad named entity: nsmid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsmid"
+ ]
+ ]
+ },
+ {
+ "input": "&nsmid;",
+ "description": "Named entity: nsmid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2224"
+ ]
+ ]
+ },
+ {
+ "input": "&nspar",
+ "description": "Bad named entity: nspar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nspar"
+ ]
+ ]
+ },
+ {
+ "input": "&nspar;",
+ "description": "Named entity: nspar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2226"
+ ]
+ ]
+ },
+ {
+ "input": "&nsqsube",
+ "description": "Bad named entity: nsqsube without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsqsube"
+ ]
+ ]
+ },
+ {
+ "input": "&nsqsube;",
+ "description": "Named entity: nsqsube; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e2"
+ ]
+ ]
+ },
+ {
+ "input": "&nsqsupe",
+ "description": "Bad named entity: nsqsupe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsqsupe"
+ ]
+ ]
+ },
+ {
+ "input": "&nsqsupe;",
+ "description": "Named entity: nsqsupe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e3"
+ ]
+ ]
+ },
+ {
+ "input": "&nsub",
+ "description": "Bad named entity: nsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsub"
+ ]
+ ]
+ },
+ {
+ "input": "&nsub;",
+ "description": "Named entity: nsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2284"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubE",
+ "description": "Bad named entity: nsubE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsubE"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubE;",
+ "description": "Named entity: nsubE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac5\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nsube",
+ "description": "Bad named entity: nsube without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsube"
+ ]
+ ]
+ },
+ {
+ "input": "&nsube;",
+ "description": "Named entity: nsube; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2288"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubset",
+ "description": "Bad named entity: nsubset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsubset"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubset;",
+ "description": "Named entity: nsubset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2282\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubseteq",
+ "description": "Bad named entity: nsubseteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsubseteq"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubseteq;",
+ "description": "Named entity: nsubseteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2288"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubseteqq",
+ "description": "Bad named entity: nsubseteqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsubseteqq"
+ ]
+ ]
+ },
+ {
+ "input": "&nsubseteqq;",
+ "description": "Named entity: nsubseteqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac5\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nsucc",
+ "description": "Bad named entity: nsucc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsucc"
+ ]
+ ]
+ },
+ {
+ "input": "&nsucc;",
+ "description": "Named entity: nsucc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2281"
+ ]
+ ]
+ },
+ {
+ "input": "&nsucceq",
+ "description": "Bad named entity: nsucceq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsucceq"
+ ]
+ ]
+ },
+ {
+ "input": "&nsucceq;",
+ "description": "Named entity: nsucceq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab0\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nsup",
+ "description": "Bad named entity: nsup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsup"
+ ]
+ ]
+ },
+ {
+ "input": "&nsup;",
+ "description": "Named entity: nsup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2285"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupE",
+ "description": "Bad named entity: nsupE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsupE"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupE;",
+ "description": "Named entity: nsupE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac6\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupe",
+ "description": "Bad named entity: nsupe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsupe"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupe;",
+ "description": "Named entity: nsupe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2289"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupset",
+ "description": "Bad named entity: nsupset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsupset"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupset;",
+ "description": "Named entity: nsupset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2283\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupseteq",
+ "description": "Bad named entity: nsupseteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsupseteq"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupseteq;",
+ "description": "Named entity: nsupseteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2289"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupseteqq",
+ "description": "Bad named entity: nsupseteqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nsupseteqq"
+ ]
+ ]
+ },
+ {
+ "input": "&nsupseteqq;",
+ "description": "Named entity: nsupseteqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac6\u0338"
+ ]
+ ]
+ },
+ {
+ "input": "&ntgl",
+ "description": "Bad named entity: ntgl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ntgl"
+ ]
+ ]
+ },
+ {
+ "input": "&ntgl;",
+ "description": "Named entity: ntgl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2279"
+ ]
+ ]
+ },
+ {
+ "input": "&ntilde",
+ "description": "Named entity: ntilde without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f1"
+ ]
+ ]
+ },
+ {
+ "input": "&ntilde;",
+ "description": "Named entity: ntilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f1"
+ ]
+ ]
+ },
+ {
+ "input": "&ntlg",
+ "description": "Bad named entity: ntlg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ntlg"
+ ]
+ ]
+ },
+ {
+ "input": "&ntlg;",
+ "description": "Named entity: ntlg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2278"
+ ]
+ ]
+ },
+ {
+ "input": "&ntriangleleft",
+ "description": "Bad named entity: ntriangleleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ntriangleleft"
+ ]
+ ]
+ },
+ {
+ "input": "&ntriangleleft;",
+ "description": "Named entity: ntriangleleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ea"
+ ]
+ ]
+ },
+ {
+ "input": "&ntrianglelefteq",
+ "description": "Bad named entity: ntrianglelefteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ntrianglelefteq"
+ ]
+ ]
+ },
+ {
+ "input": "&ntrianglelefteq;",
+ "description": "Named entity: ntrianglelefteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ec"
+ ]
+ ]
+ },
+ {
+ "input": "&ntriangleright",
+ "description": "Bad named entity: ntriangleright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ntriangleright"
+ ]
+ ]
+ },
+ {
+ "input": "&ntriangleright;",
+ "description": "Named entity: ntriangleright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22eb"
+ ]
+ ]
+ },
+ {
+ "input": "&ntrianglerighteq",
+ "description": "Bad named entity: ntrianglerighteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ntrianglerighteq"
+ ]
+ ]
+ },
+ {
+ "input": "&ntrianglerighteq;",
+ "description": "Named entity: ntrianglerighteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ed"
+ ]
+ ]
+ },
+ {
+ "input": "&nu",
+ "description": "Bad named entity: nu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nu"
+ ]
+ ]
+ },
+ {
+ "input": "&nu;",
+ "description": "Named entity: nu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03bd"
+ ]
+ ]
+ },
+ {
+ "input": "&num",
+ "description": "Bad named entity: num without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&num"
+ ]
+ ]
+ },
+ {
+ "input": "&num;",
+ "description": "Named entity: num; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "#"
+ ]
+ ]
+ },
+ {
+ "input": "&numero",
+ "description": "Bad named entity: numero without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&numero"
+ ]
+ ]
+ },
+ {
+ "input": "&numero;",
+ "description": "Named entity: numero; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2116"
+ ]
+ ]
+ },
+ {
+ "input": "&numsp",
+ "description": "Bad named entity: numsp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&numsp"
+ ]
+ ]
+ },
+ {
+ "input": "&numsp;",
+ "description": "Named entity: numsp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2007"
+ ]
+ ]
+ },
+ {
+ "input": "&nvDash",
+ "description": "Bad named entity: nvDash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvDash"
+ ]
+ ]
+ },
+ {
+ "input": "&nvDash;",
+ "description": "Named entity: nvDash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ad"
+ ]
+ ]
+ },
+ {
+ "input": "&nvHarr",
+ "description": "Bad named entity: nvHarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvHarr"
+ ]
+ ]
+ },
+ {
+ "input": "&nvHarr;",
+ "description": "Named entity: nvHarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2904"
+ ]
+ ]
+ },
+ {
+ "input": "&nvap",
+ "description": "Bad named entity: nvap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvap"
+ ]
+ ]
+ },
+ {
+ "input": "&nvap;",
+ "description": "Named entity: nvap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u224d\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvdash",
+ "description": "Bad named entity: nvdash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvdash"
+ ]
+ ]
+ },
+ {
+ "input": "&nvdash;",
+ "description": "Named entity: nvdash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ac"
+ ]
+ ]
+ },
+ {
+ "input": "&nvge",
+ "description": "Bad named entity: nvge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvge"
+ ]
+ ]
+ },
+ {
+ "input": "&nvge;",
+ "description": "Named entity: nvge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2265\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvgt",
+ "description": "Bad named entity: nvgt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvgt"
+ ]
+ ]
+ },
+ {
+ "input": "&nvgt;",
+ "description": "Named entity: nvgt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ">\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvinfin",
+ "description": "Bad named entity: nvinfin without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvinfin"
+ ]
+ ]
+ },
+ {
+ "input": "&nvinfin;",
+ "description": "Named entity: nvinfin; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29de"
+ ]
+ ]
+ },
+ {
+ "input": "&nvlArr",
+ "description": "Bad named entity: nvlArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvlArr"
+ ]
+ ]
+ },
+ {
+ "input": "&nvlArr;",
+ "description": "Named entity: nvlArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2902"
+ ]
+ ]
+ },
+ {
+ "input": "&nvle",
+ "description": "Bad named entity: nvle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvle"
+ ]
+ ]
+ },
+ {
+ "input": "&nvle;",
+ "description": "Named entity: nvle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2264\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvlt",
+ "description": "Bad named entity: nvlt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvlt"
+ ]
+ ]
+ },
+ {
+ "input": "&nvlt;",
+ "description": "Named entity: nvlt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "<\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvltrie",
+ "description": "Bad named entity: nvltrie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvltrie"
+ ]
+ ]
+ },
+ {
+ "input": "&nvltrie;",
+ "description": "Named entity: nvltrie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b4\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvrArr",
+ "description": "Bad named entity: nvrArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvrArr"
+ ]
+ ]
+ },
+ {
+ "input": "&nvrArr;",
+ "description": "Named entity: nvrArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2903"
+ ]
+ ]
+ },
+ {
+ "input": "&nvrtrie",
+ "description": "Bad named entity: nvrtrie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvrtrie"
+ ]
+ ]
+ },
+ {
+ "input": "&nvrtrie;",
+ "description": "Named entity: nvrtrie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b5\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nvsim",
+ "description": "Bad named entity: nvsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nvsim"
+ ]
+ ]
+ },
+ {
+ "input": "&nvsim;",
+ "description": "Named entity: nvsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223c\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&nwArr",
+ "description": "Bad named entity: nwArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nwArr"
+ ]
+ ]
+ },
+ {
+ "input": "&nwArr;",
+ "description": "Named entity: nwArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d6"
+ ]
+ ]
+ },
+ {
+ "input": "&nwarhk",
+ "description": "Bad named entity: nwarhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nwarhk"
+ ]
+ ]
+ },
+ {
+ "input": "&nwarhk;",
+ "description": "Named entity: nwarhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2923"
+ ]
+ ]
+ },
+ {
+ "input": "&nwarr",
+ "description": "Bad named entity: nwarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nwarr"
+ ]
+ ]
+ },
+ {
+ "input": "&nwarr;",
+ "description": "Named entity: nwarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2196"
+ ]
+ ]
+ },
+ {
+ "input": "&nwarrow",
+ "description": "Bad named entity: nwarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nwarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&nwarrow;",
+ "description": "Named entity: nwarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2196"
+ ]
+ ]
+ },
+ {
+ "input": "&nwnear",
+ "description": "Bad named entity: nwnear without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&nwnear"
+ ]
+ ]
+ },
+ {
+ "input": "&nwnear;",
+ "description": "Named entity: nwnear; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2927"
+ ]
+ ]
+ },
+ {
+ "input": "&oS",
+ "description": "Bad named entity: oS without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oS"
+ ]
+ ]
+ },
+ {
+ "input": "&oS;",
+ "description": "Named entity: oS; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u24c8"
+ ]
+ ]
+ },
+ {
+ "input": "&oacute",
+ "description": "Named entity: oacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f3"
+ ]
+ ]
+ },
+ {
+ "input": "&oacute;",
+ "description": "Named entity: oacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f3"
+ ]
+ ]
+ },
+ {
+ "input": "&oast",
+ "description": "Bad named entity: oast without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oast"
+ ]
+ ]
+ },
+ {
+ "input": "&oast;",
+ "description": "Named entity: oast; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229b"
+ ]
+ ]
+ },
+ {
+ "input": "&ocir",
+ "description": "Bad named entity: ocir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ocir"
+ ]
+ ]
+ },
+ {
+ "input": "&ocir;",
+ "description": "Named entity: ocir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229a"
+ ]
+ ]
+ },
+ {
+ "input": "&ocirc",
+ "description": "Named entity: ocirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f4"
+ ]
+ ]
+ },
+ {
+ "input": "&ocirc;",
+ "description": "Named entity: ocirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f4"
+ ]
+ ]
+ },
+ {
+ "input": "&ocy",
+ "description": "Bad named entity: ocy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ocy"
+ ]
+ ]
+ },
+ {
+ "input": "&ocy;",
+ "description": "Named entity: ocy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u043e"
+ ]
+ ]
+ },
+ {
+ "input": "&odash",
+ "description": "Bad named entity: odash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&odash"
+ ]
+ ]
+ },
+ {
+ "input": "&odash;",
+ "description": "Named entity: odash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229d"
+ ]
+ ]
+ },
+ {
+ "input": "&odblac",
+ "description": "Bad named entity: odblac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&odblac"
+ ]
+ ]
+ },
+ {
+ "input": "&odblac;",
+ "description": "Named entity: odblac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0151"
+ ]
+ ]
+ },
+ {
+ "input": "&odiv",
+ "description": "Bad named entity: odiv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&odiv"
+ ]
+ ]
+ },
+ {
+ "input": "&odiv;",
+ "description": "Named entity: odiv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a38"
+ ]
+ ]
+ },
+ {
+ "input": "&odot",
+ "description": "Bad named entity: odot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&odot"
+ ]
+ ]
+ },
+ {
+ "input": "&odot;",
+ "description": "Named entity: odot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2299"
+ ]
+ ]
+ },
+ {
+ "input": "&odsold",
+ "description": "Bad named entity: odsold without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&odsold"
+ ]
+ ]
+ },
+ {
+ "input": "&odsold;",
+ "description": "Named entity: odsold; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29bc"
+ ]
+ ]
+ },
+ {
+ "input": "&oelig",
+ "description": "Bad named entity: oelig without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oelig"
+ ]
+ ]
+ },
+ {
+ "input": "&oelig;",
+ "description": "Named entity: oelig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0153"
+ ]
+ ]
+ },
+ {
+ "input": "&ofcir",
+ "description": "Bad named entity: ofcir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ofcir"
+ ]
+ ]
+ },
+ {
+ "input": "&ofcir;",
+ "description": "Named entity: ofcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29bf"
+ ]
+ ]
+ },
+ {
+ "input": "&ofr",
+ "description": "Bad named entity: ofr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ofr"
+ ]
+ ]
+ },
+ {
+ "input": "&ofr;",
+ "description": "Named entity: ofr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd2c"
+ ]
+ ]
+ },
+ {
+ "input": "&ogon",
+ "description": "Bad named entity: ogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ogon"
+ ]
+ ]
+ },
+ {
+ "input": "&ogon;",
+ "description": "Named entity: ogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02db"
+ ]
+ ]
+ },
+ {
+ "input": "&ograve",
+ "description": "Named entity: ograve without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f2"
+ ]
+ ]
+ },
+ {
+ "input": "&ograve;",
+ "description": "Named entity: ograve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f2"
+ ]
+ ]
+ },
+ {
+ "input": "&ogt",
+ "description": "Bad named entity: ogt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ogt"
+ ]
+ ]
+ },
+ {
+ "input": "&ogt;",
+ "description": "Named entity: ogt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c1"
+ ]
+ ]
+ },
+ {
+ "input": "&ohbar",
+ "description": "Bad named entity: ohbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ohbar"
+ ]
+ ]
+ },
+ {
+ "input": "&ohbar;",
+ "description": "Named entity: ohbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b5"
+ ]
+ ]
+ },
+ {
+ "input": "&ohm",
+ "description": "Bad named entity: ohm without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ohm"
+ ]
+ ]
+ },
+ {
+ "input": "&ohm;",
+ "description": "Named entity: ohm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03a9"
+ ]
+ ]
+ },
+ {
+ "input": "&oint",
+ "description": "Bad named entity: oint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oint"
+ ]
+ ]
+ },
+ {
+ "input": "&oint;",
+ "description": "Named entity: oint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222e"
+ ]
+ ]
+ },
+ {
+ "input": "&olarr",
+ "description": "Bad named entity: olarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&olarr"
+ ]
+ ]
+ },
+ {
+ "input": "&olarr;",
+ "description": "Named entity: olarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ba"
+ ]
+ ]
+ },
+ {
+ "input": "&olcir",
+ "description": "Bad named entity: olcir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&olcir"
+ ]
+ ]
+ },
+ {
+ "input": "&olcir;",
+ "description": "Named entity: olcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29be"
+ ]
+ ]
+ },
+ {
+ "input": "&olcross",
+ "description": "Bad named entity: olcross without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&olcross"
+ ]
+ ]
+ },
+ {
+ "input": "&olcross;",
+ "description": "Named entity: olcross; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29bb"
+ ]
+ ]
+ },
+ {
+ "input": "&oline",
+ "description": "Bad named entity: oline without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oline"
+ ]
+ ]
+ },
+ {
+ "input": "&oline;",
+ "description": "Named entity: oline; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u203e"
+ ]
+ ]
+ },
+ {
+ "input": "&olt",
+ "description": "Bad named entity: olt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&olt"
+ ]
+ ]
+ },
+ {
+ "input": "&olt;",
+ "description": "Named entity: olt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c0"
+ ]
+ ]
+ },
+ {
+ "input": "&omacr",
+ "description": "Bad named entity: omacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&omacr"
+ ]
+ ]
+ },
+ {
+ "input": "&omacr;",
+ "description": "Named entity: omacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u014d"
+ ]
+ ]
+ },
+ {
+ "input": "&omega",
+ "description": "Bad named entity: omega without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&omega"
+ ]
+ ]
+ },
+ {
+ "input": "&omega;",
+ "description": "Named entity: omega; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c9"
+ ]
+ ]
+ },
+ {
+ "input": "&omicron",
+ "description": "Bad named entity: omicron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&omicron"
+ ]
+ ]
+ },
+ {
+ "input": "&omicron;",
+ "description": "Named entity: omicron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03bf"
+ ]
+ ]
+ },
+ {
+ "input": "&omid",
+ "description": "Bad named entity: omid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&omid"
+ ]
+ ]
+ },
+ {
+ "input": "&omid;",
+ "description": "Named entity: omid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b6"
+ ]
+ ]
+ },
+ {
+ "input": "&ominus",
+ "description": "Bad named entity: ominus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ominus"
+ ]
+ ]
+ },
+ {
+ "input": "&ominus;",
+ "description": "Named entity: ominus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2296"
+ ]
+ ]
+ },
+ {
+ "input": "&oopf",
+ "description": "Bad named entity: oopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oopf"
+ ]
+ ]
+ },
+ {
+ "input": "&oopf;",
+ "description": "Named entity: oopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd60"
+ ]
+ ]
+ },
+ {
+ "input": "&opar",
+ "description": "Bad named entity: opar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&opar"
+ ]
+ ]
+ },
+ {
+ "input": "&opar;",
+ "description": "Named entity: opar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b7"
+ ]
+ ]
+ },
+ {
+ "input": "&operp",
+ "description": "Bad named entity: operp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&operp"
+ ]
+ ]
+ },
+ {
+ "input": "&operp;",
+ "description": "Named entity: operp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b9"
+ ]
+ ]
+ },
+ {
+ "input": "&oplus",
+ "description": "Bad named entity: oplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oplus"
+ ]
+ ]
+ },
+ {
+ "input": "&oplus;",
+ "description": "Named entity: oplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2295"
+ ]
+ ]
+ },
+ {
+ "input": "&or",
+ "description": "Bad named entity: or without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&or"
+ ]
+ ]
+ },
+ {
+ "input": "&or;",
+ "description": "Named entity: or; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2228"
+ ]
+ ]
+ },
+ {
+ "input": "&orarr",
+ "description": "Bad named entity: orarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&orarr"
+ ]
+ ]
+ },
+ {
+ "input": "&orarr;",
+ "description": "Named entity: orarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bb"
+ ]
+ ]
+ },
+ {
+ "input": "&ord",
+ "description": "Bad named entity: ord without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ord"
+ ]
+ ]
+ },
+ {
+ "input": "&ord;",
+ "description": "Named entity: ord; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a5d"
+ ]
+ ]
+ },
+ {
+ "input": "&order",
+ "description": "Bad named entity: order without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&order"
+ ]
+ ]
+ },
+ {
+ "input": "&order;",
+ "description": "Named entity: order; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2134"
+ ]
+ ]
+ },
+ {
+ "input": "&orderof",
+ "description": "Bad named entity: orderof without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&orderof"
+ ]
+ ]
+ },
+ {
+ "input": "&orderof;",
+ "description": "Named entity: orderof; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2134"
+ ]
+ ]
+ },
+ {
+ "input": "&ordf",
+ "description": "Named entity: ordf without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00aa"
+ ]
+ ]
+ },
+ {
+ "input": "&ordf;",
+ "description": "Named entity: ordf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00aa"
+ ]
+ ]
+ },
+ {
+ "input": "&ordm",
+ "description": "Named entity: ordm without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ba"
+ ]
+ ]
+ },
+ {
+ "input": "&ordm;",
+ "description": "Named entity: ordm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ba"
+ ]
+ ]
+ },
+ {
+ "input": "&origof",
+ "description": "Bad named entity: origof without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&origof"
+ ]
+ ]
+ },
+ {
+ "input": "&origof;",
+ "description": "Named entity: origof; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b6"
+ ]
+ ]
+ },
+ {
+ "input": "&oror",
+ "description": "Bad named entity: oror without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oror"
+ ]
+ ]
+ },
+ {
+ "input": "&oror;",
+ "description": "Named entity: oror; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a56"
+ ]
+ ]
+ },
+ {
+ "input": "&orslope",
+ "description": "Bad named entity: orslope without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&orslope"
+ ]
+ ]
+ },
+ {
+ "input": "&orslope;",
+ "description": "Named entity: orslope; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a57"
+ ]
+ ]
+ },
+ {
+ "input": "&orv",
+ "description": "Bad named entity: orv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&orv"
+ ]
+ ]
+ },
+ {
+ "input": "&orv;",
+ "description": "Named entity: orv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a5b"
+ ]
+ ]
+ },
+ {
+ "input": "&oscr",
+ "description": "Bad named entity: oscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&oscr"
+ ]
+ ]
+ },
+ {
+ "input": "&oscr;",
+ "description": "Named entity: oscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2134"
+ ]
+ ]
+ },
+ {
+ "input": "&oslash",
+ "description": "Named entity: oslash without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f8"
+ ]
+ ]
+ },
+ {
+ "input": "&oslash;",
+ "description": "Named entity: oslash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f8"
+ ]
+ ]
+ },
+ {
+ "input": "&osol",
+ "description": "Bad named entity: osol without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&osol"
+ ]
+ ]
+ },
+ {
+ "input": "&osol;",
+ "description": "Named entity: osol; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2298"
+ ]
+ ]
+ },
+ {
+ "input": "&otilde",
+ "description": "Named entity: otilde without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f5"
+ ]
+ ]
+ },
+ {
+ "input": "&otilde;",
+ "description": "Named entity: otilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f5"
+ ]
+ ]
+ },
+ {
+ "input": "&otimes",
+ "description": "Bad named entity: otimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&otimes"
+ ]
+ ]
+ },
+ {
+ "input": "&otimes;",
+ "description": "Named entity: otimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2297"
+ ]
+ ]
+ },
+ {
+ "input": "&otimesas",
+ "description": "Bad named entity: otimesas without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&otimesas"
+ ]
+ ]
+ },
+ {
+ "input": "&otimesas;",
+ "description": "Named entity: otimesas; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a36"
+ ]
+ ]
+ },
+ {
+ "input": "&ouml",
+ "description": "Named entity: ouml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f6"
+ ]
+ ]
+ },
+ {
+ "input": "&ouml;",
+ "description": "Named entity: ouml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f6"
+ ]
+ ]
+ },
+ {
+ "input": "&ovbar",
+ "description": "Bad named entity: ovbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ovbar"
+ ]
+ ]
+ },
+ {
+ "input": "&ovbar;",
+ "description": "Named entity: ovbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u233d"
+ ]
+ ]
+ },
+ {
+ "input": "&par",
+ "description": "Bad named entity: par without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&par"
+ ]
+ ]
+ },
+ {
+ "input": "&par;",
+ "description": "Named entity: par; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2225"
+ ]
+ ]
+ },
+ {
+ "input": "&para",
+ "description": "Named entity: para without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b6"
+ ]
+ ]
+ },
+ {
+ "input": "&para;",
+ "description": "Named entity: para; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b6"
+ ]
+ ]
+ },
+ {
+ "input": "&parallel;",
+ "description": "Named entity: parallel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2225"
+ ]
+ ]
+ },
+ {
+ "input": "&parsim",
+ "description": "Bad named entity: parsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&parsim"
+ ]
+ ]
+ },
+ {
+ "input": "&parsim;",
+ "description": "Named entity: parsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2af3"
+ ]
+ ]
+ },
+ {
+ "input": "&parsl",
+ "description": "Bad named entity: parsl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&parsl"
+ ]
+ ]
+ },
+ {
+ "input": "&parsl;",
+ "description": "Named entity: parsl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2afd"
+ ]
+ ]
+ },
+ {
+ "input": "&part",
+ "description": "Bad named entity: part without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&part"
+ ]
+ ]
+ },
+ {
+ "input": "&part;",
+ "description": "Named entity: part; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2202"
+ ]
+ ]
+ },
+ {
+ "input": "&pcy",
+ "description": "Bad named entity: pcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pcy"
+ ]
+ ]
+ },
+ {
+ "input": "&pcy;",
+ "description": "Named entity: pcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u043f"
+ ]
+ ]
+ },
+ {
+ "input": "&percnt",
+ "description": "Bad named entity: percnt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&percnt"
+ ]
+ ]
+ },
+ {
+ "input": "&percnt;",
+ "description": "Named entity: percnt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "%"
+ ]
+ ]
+ },
+ {
+ "input": "&period",
+ "description": "Bad named entity: period without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&period"
+ ]
+ ]
+ },
+ {
+ "input": "&period;",
+ "description": "Named entity: period; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "."
+ ]
+ ]
+ },
+ {
+ "input": "&permil",
+ "description": "Bad named entity: permil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&permil"
+ ]
+ ]
+ },
+ {
+ "input": "&permil;",
+ "description": "Named entity: permil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2030"
+ ]
+ ]
+ },
+ {
+ "input": "&perp",
+ "description": "Bad named entity: perp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&perp"
+ ]
+ ]
+ },
+ {
+ "input": "&perp;",
+ "description": "Named entity: perp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a5"
+ ]
+ ]
+ },
+ {
+ "input": "&pertenk",
+ "description": "Bad named entity: pertenk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pertenk"
+ ]
+ ]
+ },
+ {
+ "input": "&pertenk;",
+ "description": "Named entity: pertenk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2031"
+ ]
+ ]
+ },
+ {
+ "input": "&pfr",
+ "description": "Bad named entity: pfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pfr"
+ ]
+ ]
+ },
+ {
+ "input": "&pfr;",
+ "description": "Named entity: pfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd2d"
+ ]
+ ]
+ },
+ {
+ "input": "&phi",
+ "description": "Bad named entity: phi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&phi"
+ ]
+ ]
+ },
+ {
+ "input": "&phi;",
+ "description": "Named entity: phi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c6"
+ ]
+ ]
+ },
+ {
+ "input": "&phiv",
+ "description": "Bad named entity: phiv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&phiv"
+ ]
+ ]
+ },
+ {
+ "input": "&phiv;",
+ "description": "Named entity: phiv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d5"
+ ]
+ ]
+ },
+ {
+ "input": "&phmmat",
+ "description": "Bad named entity: phmmat without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&phmmat"
+ ]
+ ]
+ },
+ {
+ "input": "&phmmat;",
+ "description": "Named entity: phmmat; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2133"
+ ]
+ ]
+ },
+ {
+ "input": "&phone",
+ "description": "Bad named entity: phone without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&phone"
+ ]
+ ]
+ },
+ {
+ "input": "&phone;",
+ "description": "Named entity: phone; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u260e"
+ ]
+ ]
+ },
+ {
+ "input": "&pi",
+ "description": "Bad named entity: pi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pi"
+ ]
+ ]
+ },
+ {
+ "input": "&pi;",
+ "description": "Named entity: pi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c0"
+ ]
+ ]
+ },
+ {
+ "input": "&pitchfork",
+ "description": "Bad named entity: pitchfork without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pitchfork"
+ ]
+ ]
+ },
+ {
+ "input": "&pitchfork;",
+ "description": "Named entity: pitchfork; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22d4"
+ ]
+ ]
+ },
+ {
+ "input": "&piv",
+ "description": "Bad named entity: piv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&piv"
+ ]
+ ]
+ },
+ {
+ "input": "&piv;",
+ "description": "Named entity: piv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d6"
+ ]
+ ]
+ },
+ {
+ "input": "&planck",
+ "description": "Bad named entity: planck without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&planck"
+ ]
+ ]
+ },
+ {
+ "input": "&planck;",
+ "description": "Named entity: planck; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210f"
+ ]
+ ]
+ },
+ {
+ "input": "&planckh",
+ "description": "Bad named entity: planckh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&planckh"
+ ]
+ ]
+ },
+ {
+ "input": "&planckh;",
+ "description": "Named entity: planckh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210e"
+ ]
+ ]
+ },
+ {
+ "input": "&plankv",
+ "description": "Bad named entity: plankv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plankv"
+ ]
+ ]
+ },
+ {
+ "input": "&plankv;",
+ "description": "Named entity: plankv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210f"
+ ]
+ ]
+ },
+ {
+ "input": "&plus",
+ "description": "Bad named entity: plus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plus"
+ ]
+ ]
+ },
+ {
+ "input": "&plus;",
+ "description": "Named entity: plus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "+"
+ ]
+ ]
+ },
+ {
+ "input": "&plusacir",
+ "description": "Bad named entity: plusacir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plusacir"
+ ]
+ ]
+ },
+ {
+ "input": "&plusacir;",
+ "description": "Named entity: plusacir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a23"
+ ]
+ ]
+ },
+ {
+ "input": "&plusb",
+ "description": "Bad named entity: plusb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plusb"
+ ]
+ ]
+ },
+ {
+ "input": "&plusb;",
+ "description": "Named entity: plusb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u229e"
+ ]
+ ]
+ },
+ {
+ "input": "&pluscir",
+ "description": "Bad named entity: pluscir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pluscir"
+ ]
+ ]
+ },
+ {
+ "input": "&pluscir;",
+ "description": "Named entity: pluscir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a22"
+ ]
+ ]
+ },
+ {
+ "input": "&plusdo",
+ "description": "Bad named entity: plusdo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plusdo"
+ ]
+ ]
+ },
+ {
+ "input": "&plusdo;",
+ "description": "Named entity: plusdo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2214"
+ ]
+ ]
+ },
+ {
+ "input": "&plusdu",
+ "description": "Bad named entity: plusdu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plusdu"
+ ]
+ ]
+ },
+ {
+ "input": "&plusdu;",
+ "description": "Named entity: plusdu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a25"
+ ]
+ ]
+ },
+ {
+ "input": "&pluse",
+ "description": "Bad named entity: pluse without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pluse"
+ ]
+ ]
+ },
+ {
+ "input": "&pluse;",
+ "description": "Named entity: pluse; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a72"
+ ]
+ ]
+ },
+ {
+ "input": "&plusmn",
+ "description": "Named entity: plusmn without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b1"
+ ]
+ ]
+ },
+ {
+ "input": "&plusmn;",
+ "description": "Named entity: plusmn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b1"
+ ]
+ ]
+ },
+ {
+ "input": "&plussim",
+ "description": "Bad named entity: plussim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plussim"
+ ]
+ ]
+ },
+ {
+ "input": "&plussim;",
+ "description": "Named entity: plussim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a26"
+ ]
+ ]
+ },
+ {
+ "input": "&plustwo",
+ "description": "Bad named entity: plustwo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&plustwo"
+ ]
+ ]
+ },
+ {
+ "input": "&plustwo;",
+ "description": "Named entity: plustwo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a27"
+ ]
+ ]
+ },
+ {
+ "input": "&pm",
+ "description": "Bad named entity: pm without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pm"
+ ]
+ ]
+ },
+ {
+ "input": "&pm;",
+ "description": "Named entity: pm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b1"
+ ]
+ ]
+ },
+ {
+ "input": "&pointint",
+ "description": "Bad named entity: pointint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pointint"
+ ]
+ ]
+ },
+ {
+ "input": "&pointint;",
+ "description": "Named entity: pointint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a15"
+ ]
+ ]
+ },
+ {
+ "input": "&popf",
+ "description": "Bad named entity: popf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&popf"
+ ]
+ ]
+ },
+ {
+ "input": "&popf;",
+ "description": "Named entity: popf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd61"
+ ]
+ ]
+ },
+ {
+ "input": "&pound",
+ "description": "Named entity: pound without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a3"
+ ]
+ ]
+ },
+ {
+ "input": "&pound;",
+ "description": "Named entity: pound; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a3"
+ ]
+ ]
+ },
+ {
+ "input": "&pr",
+ "description": "Bad named entity: pr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pr"
+ ]
+ ]
+ },
+ {
+ "input": "&pr;",
+ "description": "Named entity: pr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227a"
+ ]
+ ]
+ },
+ {
+ "input": "&prE",
+ "description": "Bad named entity: prE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prE"
+ ]
+ ]
+ },
+ {
+ "input": "&prE;",
+ "description": "Named entity: prE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab3"
+ ]
+ ]
+ },
+ {
+ "input": "&prap",
+ "description": "Bad named entity: prap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prap"
+ ]
+ ]
+ },
+ {
+ "input": "&prap;",
+ "description": "Named entity: prap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab7"
+ ]
+ ]
+ },
+ {
+ "input": "&prcue",
+ "description": "Bad named entity: prcue without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prcue"
+ ]
+ ]
+ },
+ {
+ "input": "&prcue;",
+ "description": "Named entity: prcue; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227c"
+ ]
+ ]
+ },
+ {
+ "input": "&pre",
+ "description": "Bad named entity: pre without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pre"
+ ]
+ ]
+ },
+ {
+ "input": "&pre;",
+ "description": "Named entity: pre; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaf"
+ ]
+ ]
+ },
+ {
+ "input": "&prec",
+ "description": "Bad named entity: prec without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prec"
+ ]
+ ]
+ },
+ {
+ "input": "&prec;",
+ "description": "Named entity: prec; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227a"
+ ]
+ ]
+ },
+ {
+ "input": "&precapprox",
+ "description": "Bad named entity: precapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&precapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&precapprox;",
+ "description": "Named entity: precapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab7"
+ ]
+ ]
+ },
+ {
+ "input": "&preccurlyeq",
+ "description": "Bad named entity: preccurlyeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&preccurlyeq"
+ ]
+ ]
+ },
+ {
+ "input": "&preccurlyeq;",
+ "description": "Named entity: preccurlyeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227c"
+ ]
+ ]
+ },
+ {
+ "input": "&preceq",
+ "description": "Bad named entity: preceq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&preceq"
+ ]
+ ]
+ },
+ {
+ "input": "&preceq;",
+ "description": "Named entity: preceq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaf"
+ ]
+ ]
+ },
+ {
+ "input": "&precnapprox",
+ "description": "Bad named entity: precnapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&precnapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&precnapprox;",
+ "description": "Named entity: precnapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab9"
+ ]
+ ]
+ },
+ {
+ "input": "&precneqq",
+ "description": "Bad named entity: precneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&precneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&precneqq;",
+ "description": "Named entity: precneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab5"
+ ]
+ ]
+ },
+ {
+ "input": "&precnsim",
+ "description": "Bad named entity: precnsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&precnsim"
+ ]
+ ]
+ },
+ {
+ "input": "&precnsim;",
+ "description": "Named entity: precnsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e8"
+ ]
+ ]
+ },
+ {
+ "input": "&precsim",
+ "description": "Bad named entity: precsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&precsim"
+ ]
+ ]
+ },
+ {
+ "input": "&precsim;",
+ "description": "Named entity: precsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227e"
+ ]
+ ]
+ },
+ {
+ "input": "&prime",
+ "description": "Bad named entity: prime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prime"
+ ]
+ ]
+ },
+ {
+ "input": "&prime;",
+ "description": "Named entity: prime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2032"
+ ]
+ ]
+ },
+ {
+ "input": "&primes",
+ "description": "Bad named entity: primes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&primes"
+ ]
+ ]
+ },
+ {
+ "input": "&primes;",
+ "description": "Named entity: primes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2119"
+ ]
+ ]
+ },
+ {
+ "input": "&prnE",
+ "description": "Bad named entity: prnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prnE"
+ ]
+ ]
+ },
+ {
+ "input": "&prnE;",
+ "description": "Named entity: prnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab5"
+ ]
+ ]
+ },
+ {
+ "input": "&prnap",
+ "description": "Bad named entity: prnap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prnap"
+ ]
+ ]
+ },
+ {
+ "input": "&prnap;",
+ "description": "Named entity: prnap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab9"
+ ]
+ ]
+ },
+ {
+ "input": "&prnsim",
+ "description": "Bad named entity: prnsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prnsim"
+ ]
+ ]
+ },
+ {
+ "input": "&prnsim;",
+ "description": "Named entity: prnsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e8"
+ ]
+ ]
+ },
+ {
+ "input": "&prod",
+ "description": "Bad named entity: prod without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prod"
+ ]
+ ]
+ },
+ {
+ "input": "&prod;",
+ "description": "Named entity: prod; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u220f"
+ ]
+ ]
+ },
+ {
+ "input": "&profalar",
+ "description": "Bad named entity: profalar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&profalar"
+ ]
+ ]
+ },
+ {
+ "input": "&profalar;",
+ "description": "Named entity: profalar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u232e"
+ ]
+ ]
+ },
+ {
+ "input": "&profline",
+ "description": "Bad named entity: profline without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&profline"
+ ]
+ ]
+ },
+ {
+ "input": "&profline;",
+ "description": "Named entity: profline; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2312"
+ ]
+ ]
+ },
+ {
+ "input": "&profsurf",
+ "description": "Bad named entity: profsurf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&profsurf"
+ ]
+ ]
+ },
+ {
+ "input": "&profsurf;",
+ "description": "Named entity: profsurf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2313"
+ ]
+ ]
+ },
+ {
+ "input": "&prop",
+ "description": "Bad named entity: prop without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prop"
+ ]
+ ]
+ },
+ {
+ "input": "&prop;",
+ "description": "Named entity: prop; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221d"
+ ]
+ ]
+ },
+ {
+ "input": "&propto",
+ "description": "Bad named entity: propto without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&propto"
+ ]
+ ]
+ },
+ {
+ "input": "&propto;",
+ "description": "Named entity: propto; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221d"
+ ]
+ ]
+ },
+ {
+ "input": "&prsim",
+ "description": "Bad named entity: prsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prsim"
+ ]
+ ]
+ },
+ {
+ "input": "&prsim;",
+ "description": "Named entity: prsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227e"
+ ]
+ ]
+ },
+ {
+ "input": "&prurel",
+ "description": "Bad named entity: prurel without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&prurel"
+ ]
+ ]
+ },
+ {
+ "input": "&prurel;",
+ "description": "Named entity: prurel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b0"
+ ]
+ ]
+ },
+ {
+ "input": "&pscr",
+ "description": "Bad named entity: pscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&pscr"
+ ]
+ ]
+ },
+ {
+ "input": "&pscr;",
+ "description": "Named entity: pscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc5"
+ ]
+ ]
+ },
+ {
+ "input": "&psi",
+ "description": "Bad named entity: psi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&psi"
+ ]
+ ]
+ },
+ {
+ "input": "&psi;",
+ "description": "Named entity: psi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c8"
+ ]
+ ]
+ },
+ {
+ "input": "&puncsp",
+ "description": "Bad named entity: puncsp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&puncsp"
+ ]
+ ]
+ },
+ {
+ "input": "&puncsp;",
+ "description": "Named entity: puncsp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2008"
+ ]
+ ]
+ },
+ {
+ "input": "&qfr",
+ "description": "Bad named entity: qfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&qfr"
+ ]
+ ]
+ },
+ {
+ "input": "&qfr;",
+ "description": "Named entity: qfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd2e"
+ ]
+ ]
+ },
+ {
+ "input": "&qint",
+ "description": "Bad named entity: qint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&qint"
+ ]
+ ]
+ },
+ {
+ "input": "&qint;",
+ "description": "Named entity: qint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a0c"
+ ]
+ ]
+ },
+ {
+ "input": "&qopf",
+ "description": "Bad named entity: qopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&qopf"
+ ]
+ ]
+ },
+ {
+ "input": "&qopf;",
+ "description": "Named entity: qopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd62"
+ ]
+ ]
+ },
+ {
+ "input": "&qprime",
+ "description": "Bad named entity: qprime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&qprime"
+ ]
+ ]
+ },
+ {
+ "input": "&qprime;",
+ "description": "Named entity: qprime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2057"
+ ]
+ ]
+ },
+ {
+ "input": "&qscr",
+ "description": "Bad named entity: qscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&qscr"
+ ]
+ ]
+ },
+ {
+ "input": "&qscr;",
+ "description": "Named entity: qscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc6"
+ ]
+ ]
+ },
+ {
+ "input": "&quaternions",
+ "description": "Bad named entity: quaternions without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&quaternions"
+ ]
+ ]
+ },
+ {
+ "input": "&quaternions;",
+ "description": "Named entity: quaternions; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u210d"
+ ]
+ ]
+ },
+ {
+ "input": "&quatint",
+ "description": "Bad named entity: quatint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&quatint"
+ ]
+ ]
+ },
+ {
+ "input": "&quatint;",
+ "description": "Named entity: quatint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a16"
+ ]
+ ]
+ },
+ {
+ "input": "&quest",
+ "description": "Bad named entity: quest without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&quest"
+ ]
+ ]
+ },
+ {
+ "input": "&quest;",
+ "description": "Named entity: quest; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "?"
+ ]
+ ]
+ },
+ {
+ "input": "&questeq",
+ "description": "Bad named entity: questeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&questeq"
+ ]
+ ]
+ },
+ {
+ "input": "&questeq;",
+ "description": "Named entity: questeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u225f"
+ ]
+ ]
+ },
+ {
+ "input": "&quot",
+ "description": "Named entity: quot without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\""
+ ]
+ ]
+ },
+ {
+ "input": "&quot;",
+ "description": "Named entity: quot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\""
+ ]
+ ]
+ },
+ {
+ "input": "&rAarr",
+ "description": "Bad named entity: rAarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rAarr"
+ ]
+ ]
+ },
+ {
+ "input": "&rAarr;",
+ "description": "Named entity: rAarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21db"
+ ]
+ ]
+ },
+ {
+ "input": "&rArr",
+ "description": "Bad named entity: rArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rArr"
+ ]
+ ]
+ },
+ {
+ "input": "&rArr;",
+ "description": "Named entity: rArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d2"
+ ]
+ ]
+ },
+ {
+ "input": "&rAtail",
+ "description": "Bad named entity: rAtail without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rAtail"
+ ]
+ ]
+ },
+ {
+ "input": "&rAtail;",
+ "description": "Named entity: rAtail; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u291c"
+ ]
+ ]
+ },
+ {
+ "input": "&rBarr",
+ "description": "Bad named entity: rBarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rBarr"
+ ]
+ ]
+ },
+ {
+ "input": "&rBarr;",
+ "description": "Named entity: rBarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u290f"
+ ]
+ ]
+ },
+ {
+ "input": "&rHar",
+ "description": "Bad named entity: rHar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rHar"
+ ]
+ ]
+ },
+ {
+ "input": "&rHar;",
+ "description": "Named entity: rHar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2964"
+ ]
+ ]
+ },
+ {
+ "input": "&race",
+ "description": "Bad named entity: race without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&race"
+ ]
+ ]
+ },
+ {
+ "input": "&race;",
+ "description": "Named entity: race; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223d\u0331"
+ ]
+ ]
+ },
+ {
+ "input": "&racute",
+ "description": "Bad named entity: racute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&racute"
+ ]
+ ]
+ },
+ {
+ "input": "&racute;",
+ "description": "Named entity: racute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0155"
+ ]
+ ]
+ },
+ {
+ "input": "&radic",
+ "description": "Bad named entity: radic without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&radic"
+ ]
+ ]
+ },
+ {
+ "input": "&radic;",
+ "description": "Named entity: radic; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221a"
+ ]
+ ]
+ },
+ {
+ "input": "&raemptyv",
+ "description": "Bad named entity: raemptyv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&raemptyv"
+ ]
+ ]
+ },
+ {
+ "input": "&raemptyv;",
+ "description": "Named entity: raemptyv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29b3"
+ ]
+ ]
+ },
+ {
+ "input": "&rang",
+ "description": "Bad named entity: rang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rang"
+ ]
+ ]
+ },
+ {
+ "input": "&rang;",
+ "description": "Named entity: rang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e9"
+ ]
+ ]
+ },
+ {
+ "input": "&rangd",
+ "description": "Bad named entity: rangd without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rangd"
+ ]
+ ]
+ },
+ {
+ "input": "&rangd;",
+ "description": "Named entity: rangd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2992"
+ ]
+ ]
+ },
+ {
+ "input": "&range",
+ "description": "Bad named entity: range without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&range"
+ ]
+ ]
+ },
+ {
+ "input": "&range;",
+ "description": "Named entity: range; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29a5"
+ ]
+ ]
+ },
+ {
+ "input": "&rangle",
+ "description": "Bad named entity: rangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rangle"
+ ]
+ ]
+ },
+ {
+ "input": "&rangle;",
+ "description": "Named entity: rangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e9"
+ ]
+ ]
+ },
+ {
+ "input": "&raquo",
+ "description": "Named entity: raquo without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00bb"
+ ]
+ ]
+ },
+ {
+ "input": "&raquo;",
+ "description": "Named entity: raquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00bb"
+ ]
+ ]
+ },
+ {
+ "input": "&rarr",
+ "description": "Bad named entity: rarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarr"
+ ]
+ ]
+ },
+ {
+ "input": "&rarr;",
+ "description": "Named entity: rarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2192"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrap",
+ "description": "Bad named entity: rarrap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrap"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrap;",
+ "description": "Named entity: rarrap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2975"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrb",
+ "description": "Bad named entity: rarrb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrb"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrb;",
+ "description": "Named entity: rarrb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21e5"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrbfs",
+ "description": "Bad named entity: rarrbfs without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrbfs"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrbfs;",
+ "description": "Named entity: rarrbfs; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2920"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrc",
+ "description": "Bad named entity: rarrc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrc"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrc;",
+ "description": "Named entity: rarrc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2933"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrfs",
+ "description": "Bad named entity: rarrfs without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrfs"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrfs;",
+ "description": "Named entity: rarrfs; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u291e"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrhk",
+ "description": "Bad named entity: rarrhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrhk"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrhk;",
+ "description": "Named entity: rarrhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21aa"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrlp",
+ "description": "Bad named entity: rarrlp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrlp"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrlp;",
+ "description": "Named entity: rarrlp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21ac"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrpl",
+ "description": "Bad named entity: rarrpl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrpl"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrpl;",
+ "description": "Named entity: rarrpl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2945"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrsim",
+ "description": "Bad named entity: rarrsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrsim"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrsim;",
+ "description": "Named entity: rarrsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2974"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrtl",
+ "description": "Bad named entity: rarrtl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrtl"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrtl;",
+ "description": "Named entity: rarrtl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a3"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrw",
+ "description": "Bad named entity: rarrw without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rarrw"
+ ]
+ ]
+ },
+ {
+ "input": "&rarrw;",
+ "description": "Named entity: rarrw; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219d"
+ ]
+ ]
+ },
+ {
+ "input": "&ratail",
+ "description": "Bad named entity: ratail without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ratail"
+ ]
+ ]
+ },
+ {
+ "input": "&ratail;",
+ "description": "Named entity: ratail; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u291a"
+ ]
+ ]
+ },
+ {
+ "input": "&ratio",
+ "description": "Bad named entity: ratio without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ratio"
+ ]
+ ]
+ },
+ {
+ "input": "&ratio;",
+ "description": "Named entity: ratio; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2236"
+ ]
+ ]
+ },
+ {
+ "input": "&rationals",
+ "description": "Bad named entity: rationals without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rationals"
+ ]
+ ]
+ },
+ {
+ "input": "&rationals;",
+ "description": "Named entity: rationals; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211a"
+ ]
+ ]
+ },
+ {
+ "input": "&rbarr",
+ "description": "Bad named entity: rbarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbarr"
+ ]
+ ]
+ },
+ {
+ "input": "&rbarr;",
+ "description": "Named entity: rbarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u290d"
+ ]
+ ]
+ },
+ {
+ "input": "&rbbrk",
+ "description": "Bad named entity: rbbrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbbrk"
+ ]
+ ]
+ },
+ {
+ "input": "&rbbrk;",
+ "description": "Named entity: rbbrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2773"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrace",
+ "description": "Bad named entity: rbrace without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbrace"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrace;",
+ "description": "Named entity: rbrace; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "}"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrack",
+ "description": "Bad named entity: rbrack without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbrack"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrack;",
+ "description": "Named entity: rbrack; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "]"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrke",
+ "description": "Bad named entity: rbrke without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbrke"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrke;",
+ "description": "Named entity: rbrke; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u298c"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrksld",
+ "description": "Bad named entity: rbrksld without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbrksld"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrksld;",
+ "description": "Named entity: rbrksld; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u298e"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrkslu",
+ "description": "Bad named entity: rbrkslu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rbrkslu"
+ ]
+ ]
+ },
+ {
+ "input": "&rbrkslu;",
+ "description": "Named entity: rbrkslu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2990"
+ ]
+ ]
+ },
+ {
+ "input": "&rcaron",
+ "description": "Bad named entity: rcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&rcaron;",
+ "description": "Named entity: rcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0159"
+ ]
+ ]
+ },
+ {
+ "input": "&rcedil",
+ "description": "Bad named entity: rcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&rcedil;",
+ "description": "Named entity: rcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0157"
+ ]
+ ]
+ },
+ {
+ "input": "&rceil",
+ "description": "Bad named entity: rceil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rceil"
+ ]
+ ]
+ },
+ {
+ "input": "&rceil;",
+ "description": "Named entity: rceil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2309"
+ ]
+ ]
+ },
+ {
+ "input": "&rcub",
+ "description": "Bad named entity: rcub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rcub"
+ ]
+ ]
+ },
+ {
+ "input": "&rcub;",
+ "description": "Named entity: rcub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "}"
+ ]
+ ]
+ },
+ {
+ "input": "&rcy",
+ "description": "Bad named entity: rcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rcy"
+ ]
+ ]
+ },
+ {
+ "input": "&rcy;",
+ "description": "Named entity: rcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0440"
+ ]
+ ]
+ },
+ {
+ "input": "&rdca",
+ "description": "Bad named entity: rdca without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rdca"
+ ]
+ ]
+ },
+ {
+ "input": "&rdca;",
+ "description": "Named entity: rdca; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2937"
+ ]
+ ]
+ },
+ {
+ "input": "&rdldhar",
+ "description": "Bad named entity: rdldhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rdldhar"
+ ]
+ ]
+ },
+ {
+ "input": "&rdldhar;",
+ "description": "Named entity: rdldhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2969"
+ ]
+ ]
+ },
+ {
+ "input": "&rdquo",
+ "description": "Bad named entity: rdquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rdquo"
+ ]
+ ]
+ },
+ {
+ "input": "&rdquo;",
+ "description": "Named entity: rdquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201d"
+ ]
+ ]
+ },
+ {
+ "input": "&rdquor",
+ "description": "Bad named entity: rdquor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rdquor"
+ ]
+ ]
+ },
+ {
+ "input": "&rdquor;",
+ "description": "Named entity: rdquor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201d"
+ ]
+ ]
+ },
+ {
+ "input": "&rdsh",
+ "description": "Bad named entity: rdsh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rdsh"
+ ]
+ ]
+ },
+ {
+ "input": "&rdsh;",
+ "description": "Named entity: rdsh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b3"
+ ]
+ ]
+ },
+ {
+ "input": "&real",
+ "description": "Bad named entity: real without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&real"
+ ]
+ ]
+ },
+ {
+ "input": "&real;",
+ "description": "Named entity: real; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211c"
+ ]
+ ]
+ },
+ {
+ "input": "&realine",
+ "description": "Bad named entity: realine without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&realine"
+ ]
+ ]
+ },
+ {
+ "input": "&realine;",
+ "description": "Named entity: realine; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211b"
+ ]
+ ]
+ },
+ {
+ "input": "&realpart",
+ "description": "Bad named entity: realpart without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&realpart"
+ ]
+ ]
+ },
+ {
+ "input": "&realpart;",
+ "description": "Named entity: realpart; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211c"
+ ]
+ ]
+ },
+ {
+ "input": "&reals",
+ "description": "Bad named entity: reals without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&reals"
+ ]
+ ]
+ },
+ {
+ "input": "&reals;",
+ "description": "Named entity: reals; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211d"
+ ]
+ ]
+ },
+ {
+ "input": "&rect",
+ "description": "Bad named entity: rect without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rect"
+ ]
+ ]
+ },
+ {
+ "input": "&rect;",
+ "description": "Named entity: rect; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ad"
+ ]
+ ]
+ },
+ {
+ "input": "&reg",
+ "description": "Named entity: reg without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ae"
+ ]
+ ]
+ },
+ {
+ "input": "&reg;",
+ "description": "Named entity: reg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ae"
+ ]
+ ]
+ },
+ {
+ "input": "&rfisht",
+ "description": "Bad named entity: rfisht without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rfisht"
+ ]
+ ]
+ },
+ {
+ "input": "&rfisht;",
+ "description": "Named entity: rfisht; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u297d"
+ ]
+ ]
+ },
+ {
+ "input": "&rfloor",
+ "description": "Bad named entity: rfloor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rfloor"
+ ]
+ ]
+ },
+ {
+ "input": "&rfloor;",
+ "description": "Named entity: rfloor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230b"
+ ]
+ ]
+ },
+ {
+ "input": "&rfr",
+ "description": "Bad named entity: rfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rfr"
+ ]
+ ]
+ },
+ {
+ "input": "&rfr;",
+ "description": "Named entity: rfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd2f"
+ ]
+ ]
+ },
+ {
+ "input": "&rhard",
+ "description": "Bad named entity: rhard without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rhard"
+ ]
+ ]
+ },
+ {
+ "input": "&rhard;",
+ "description": "Named entity: rhard; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c1"
+ ]
+ ]
+ },
+ {
+ "input": "&rharu",
+ "description": "Bad named entity: rharu without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rharu"
+ ]
+ ]
+ },
+ {
+ "input": "&rharu;",
+ "description": "Named entity: rharu; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c0"
+ ]
+ ]
+ },
+ {
+ "input": "&rharul",
+ "description": "Bad named entity: rharul without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rharul"
+ ]
+ ]
+ },
+ {
+ "input": "&rharul;",
+ "description": "Named entity: rharul; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296c"
+ ]
+ ]
+ },
+ {
+ "input": "&rho",
+ "description": "Bad named entity: rho without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rho"
+ ]
+ ]
+ },
+ {
+ "input": "&rho;",
+ "description": "Named entity: rho; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c1"
+ ]
+ ]
+ },
+ {
+ "input": "&rhov",
+ "description": "Bad named entity: rhov without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rhov"
+ ]
+ ]
+ },
+ {
+ "input": "&rhov;",
+ "description": "Named entity: rhov; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f1"
+ ]
+ ]
+ },
+ {
+ "input": "&rightarrow",
+ "description": "Bad named entity: rightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&rightarrow;",
+ "description": "Named entity: rightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2192"
+ ]
+ ]
+ },
+ {
+ "input": "&rightarrowtail",
+ "description": "Bad named entity: rightarrowtail without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightarrowtail"
+ ]
+ ]
+ },
+ {
+ "input": "&rightarrowtail;",
+ "description": "Named entity: rightarrowtail; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a3"
+ ]
+ ]
+ },
+ {
+ "input": "&rightharpoondown",
+ "description": "Bad named entity: rightharpoondown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightharpoondown"
+ ]
+ ]
+ },
+ {
+ "input": "&rightharpoondown;",
+ "description": "Named entity: rightharpoondown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c1"
+ ]
+ ]
+ },
+ {
+ "input": "&rightharpoonup",
+ "description": "Bad named entity: rightharpoonup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightharpoonup"
+ ]
+ ]
+ },
+ {
+ "input": "&rightharpoonup;",
+ "description": "Named entity: rightharpoonup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c0"
+ ]
+ ]
+ },
+ {
+ "input": "&rightleftarrows",
+ "description": "Bad named entity: rightleftarrows without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightleftarrows"
+ ]
+ ]
+ },
+ {
+ "input": "&rightleftarrows;",
+ "description": "Named entity: rightleftarrows; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c4"
+ ]
+ ]
+ },
+ {
+ "input": "&rightleftharpoons",
+ "description": "Bad named entity: rightleftharpoons without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightleftharpoons"
+ ]
+ ]
+ },
+ {
+ "input": "&rightleftharpoons;",
+ "description": "Named entity: rightleftharpoons; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cc"
+ ]
+ ]
+ },
+ {
+ "input": "&rightrightarrows",
+ "description": "Bad named entity: rightrightarrows without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightrightarrows"
+ ]
+ ]
+ },
+ {
+ "input": "&rightrightarrows;",
+ "description": "Named entity: rightrightarrows; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c9"
+ ]
+ ]
+ },
+ {
+ "input": "&rightsquigarrow",
+ "description": "Bad named entity: rightsquigarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightsquigarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&rightsquigarrow;",
+ "description": "Named entity: rightsquigarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219d"
+ ]
+ ]
+ },
+ {
+ "input": "&rightthreetimes",
+ "description": "Bad named entity: rightthreetimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rightthreetimes"
+ ]
+ ]
+ },
+ {
+ "input": "&rightthreetimes;",
+ "description": "Named entity: rightthreetimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cc"
+ ]
+ ]
+ },
+ {
+ "input": "&ring",
+ "description": "Bad named entity: ring without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ring"
+ ]
+ ]
+ },
+ {
+ "input": "&ring;",
+ "description": "Named entity: ring; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02da"
+ ]
+ ]
+ },
+ {
+ "input": "&risingdotseq",
+ "description": "Bad named entity: risingdotseq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&risingdotseq"
+ ]
+ ]
+ },
+ {
+ "input": "&risingdotseq;",
+ "description": "Named entity: risingdotseq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2253"
+ ]
+ ]
+ },
+ {
+ "input": "&rlarr",
+ "description": "Bad named entity: rlarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rlarr"
+ ]
+ ]
+ },
+ {
+ "input": "&rlarr;",
+ "description": "Named entity: rlarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c4"
+ ]
+ ]
+ },
+ {
+ "input": "&rlhar",
+ "description": "Bad named entity: rlhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rlhar"
+ ]
+ ]
+ },
+ {
+ "input": "&rlhar;",
+ "description": "Named entity: rlhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21cc"
+ ]
+ ]
+ },
+ {
+ "input": "&rlm",
+ "description": "Bad named entity: rlm without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rlm"
+ ]
+ ]
+ },
+ {
+ "input": "&rlm;",
+ "description": "Named entity: rlm; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200f"
+ ]
+ ]
+ },
+ {
+ "input": "&rmoust",
+ "description": "Bad named entity: rmoust without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rmoust"
+ ]
+ ]
+ },
+ {
+ "input": "&rmoust;",
+ "description": "Named entity: rmoust; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b1"
+ ]
+ ]
+ },
+ {
+ "input": "&rmoustache",
+ "description": "Bad named entity: rmoustache without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rmoustache"
+ ]
+ ]
+ },
+ {
+ "input": "&rmoustache;",
+ "description": "Named entity: rmoustache; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b1"
+ ]
+ ]
+ },
+ {
+ "input": "&rnmid",
+ "description": "Bad named entity: rnmid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rnmid"
+ ]
+ ]
+ },
+ {
+ "input": "&rnmid;",
+ "description": "Named entity: rnmid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aee"
+ ]
+ ]
+ },
+ {
+ "input": "&roang",
+ "description": "Bad named entity: roang without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&roang"
+ ]
+ ]
+ },
+ {
+ "input": "&roang;",
+ "description": "Named entity: roang; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27ed"
+ ]
+ ]
+ },
+ {
+ "input": "&roarr",
+ "description": "Bad named entity: roarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&roarr"
+ ]
+ ]
+ },
+ {
+ "input": "&roarr;",
+ "description": "Named entity: roarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21fe"
+ ]
+ ]
+ },
+ {
+ "input": "&robrk",
+ "description": "Bad named entity: robrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&robrk"
+ ]
+ ]
+ },
+ {
+ "input": "&robrk;",
+ "description": "Named entity: robrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27e7"
+ ]
+ ]
+ },
+ {
+ "input": "&ropar",
+ "description": "Bad named entity: ropar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ropar"
+ ]
+ ]
+ },
+ {
+ "input": "&ropar;",
+ "description": "Named entity: ropar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2986"
+ ]
+ ]
+ },
+ {
+ "input": "&ropf",
+ "description": "Bad named entity: ropf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ropf"
+ ]
+ ]
+ },
+ {
+ "input": "&ropf;",
+ "description": "Named entity: ropf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd63"
+ ]
+ ]
+ },
+ {
+ "input": "&roplus",
+ "description": "Bad named entity: roplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&roplus"
+ ]
+ ]
+ },
+ {
+ "input": "&roplus;",
+ "description": "Named entity: roplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a2e"
+ ]
+ ]
+ },
+ {
+ "input": "&rotimes",
+ "description": "Bad named entity: rotimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rotimes"
+ ]
+ ]
+ },
+ {
+ "input": "&rotimes;",
+ "description": "Named entity: rotimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a35"
+ ]
+ ]
+ },
+ {
+ "input": "&rpar",
+ "description": "Bad named entity: rpar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rpar"
+ ]
+ ]
+ },
+ {
+ "input": "&rpar;",
+ "description": "Named entity: rpar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ")"
+ ]
+ ]
+ },
+ {
+ "input": "&rpargt",
+ "description": "Bad named entity: rpargt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rpargt"
+ ]
+ ]
+ },
+ {
+ "input": "&rpargt;",
+ "description": "Named entity: rpargt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2994"
+ ]
+ ]
+ },
+ {
+ "input": "&rppolint",
+ "description": "Bad named entity: rppolint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rppolint"
+ ]
+ ]
+ },
+ {
+ "input": "&rppolint;",
+ "description": "Named entity: rppolint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a12"
+ ]
+ ]
+ },
+ {
+ "input": "&rrarr",
+ "description": "Bad named entity: rrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&rrarr;",
+ "description": "Named entity: rrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c9"
+ ]
+ ]
+ },
+ {
+ "input": "&rsaquo",
+ "description": "Bad named entity: rsaquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rsaquo"
+ ]
+ ]
+ },
+ {
+ "input": "&rsaquo;",
+ "description": "Named entity: rsaquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u203a"
+ ]
+ ]
+ },
+ {
+ "input": "&rscr",
+ "description": "Bad named entity: rscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rscr"
+ ]
+ ]
+ },
+ {
+ "input": "&rscr;",
+ "description": "Named entity: rscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc7"
+ ]
+ ]
+ },
+ {
+ "input": "&rsh",
+ "description": "Bad named entity: rsh without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rsh"
+ ]
+ ]
+ },
+ {
+ "input": "&rsh;",
+ "description": "Named entity: rsh; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21b1"
+ ]
+ ]
+ },
+ {
+ "input": "&rsqb",
+ "description": "Bad named entity: rsqb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rsqb"
+ ]
+ ]
+ },
+ {
+ "input": "&rsqb;",
+ "description": "Named entity: rsqb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "]"
+ ]
+ ]
+ },
+ {
+ "input": "&rsquo",
+ "description": "Bad named entity: rsquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rsquo"
+ ]
+ ]
+ },
+ {
+ "input": "&rsquo;",
+ "description": "Named entity: rsquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2019"
+ ]
+ ]
+ },
+ {
+ "input": "&rsquor",
+ "description": "Bad named entity: rsquor without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rsquor"
+ ]
+ ]
+ },
+ {
+ "input": "&rsquor;",
+ "description": "Named entity: rsquor; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2019"
+ ]
+ ]
+ },
+ {
+ "input": "&rthree",
+ "description": "Bad named entity: rthree without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rthree"
+ ]
+ ]
+ },
+ {
+ "input": "&rthree;",
+ "description": "Named entity: rthree; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22cc"
+ ]
+ ]
+ },
+ {
+ "input": "&rtimes",
+ "description": "Bad named entity: rtimes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rtimes"
+ ]
+ ]
+ },
+ {
+ "input": "&rtimes;",
+ "description": "Named entity: rtimes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ca"
+ ]
+ ]
+ },
+ {
+ "input": "&rtri",
+ "description": "Bad named entity: rtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rtri"
+ ]
+ ]
+ },
+ {
+ "input": "&rtri;",
+ "description": "Named entity: rtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b9"
+ ]
+ ]
+ },
+ {
+ "input": "&rtrie",
+ "description": "Bad named entity: rtrie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rtrie"
+ ]
+ ]
+ },
+ {
+ "input": "&rtrie;",
+ "description": "Named entity: rtrie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b5"
+ ]
+ ]
+ },
+ {
+ "input": "&rtrif",
+ "description": "Bad named entity: rtrif without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rtrif"
+ ]
+ ]
+ },
+ {
+ "input": "&rtrif;",
+ "description": "Named entity: rtrif; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b8"
+ ]
+ ]
+ },
+ {
+ "input": "&rtriltri",
+ "description": "Bad named entity: rtriltri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rtriltri"
+ ]
+ ]
+ },
+ {
+ "input": "&rtriltri;",
+ "description": "Named entity: rtriltri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29ce"
+ ]
+ ]
+ },
+ {
+ "input": "&ruluhar",
+ "description": "Bad named entity: ruluhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ruluhar"
+ ]
+ ]
+ },
+ {
+ "input": "&ruluhar;",
+ "description": "Named entity: ruluhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2968"
+ ]
+ ]
+ },
+ {
+ "input": "&rx",
+ "description": "Bad named entity: rx without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&rx"
+ ]
+ ]
+ },
+ {
+ "input": "&rx;",
+ "description": "Named entity: rx; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u211e"
+ ]
+ ]
+ },
+ {
+ "input": "&sacute",
+ "description": "Bad named entity: sacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sacute"
+ ]
+ ]
+ },
+ {
+ "input": "&sacute;",
+ "description": "Named entity: sacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u015b"
+ ]
+ ]
+ },
+ {
+ "input": "&sbquo",
+ "description": "Bad named entity: sbquo without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sbquo"
+ ]
+ ]
+ },
+ {
+ "input": "&sbquo;",
+ "description": "Named entity: sbquo; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u201a"
+ ]
+ ]
+ },
+ {
+ "input": "&sc",
+ "description": "Bad named entity: sc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sc"
+ ]
+ ]
+ },
+ {
+ "input": "&sc;",
+ "description": "Named entity: sc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227b"
+ ]
+ ]
+ },
+ {
+ "input": "&scE",
+ "description": "Bad named entity: scE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scE"
+ ]
+ ]
+ },
+ {
+ "input": "&scE;",
+ "description": "Named entity: scE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab4"
+ ]
+ ]
+ },
+ {
+ "input": "&scap",
+ "description": "Bad named entity: scap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scap"
+ ]
+ ]
+ },
+ {
+ "input": "&scap;",
+ "description": "Named entity: scap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab8"
+ ]
+ ]
+ },
+ {
+ "input": "&scaron",
+ "description": "Bad named entity: scaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scaron"
+ ]
+ ]
+ },
+ {
+ "input": "&scaron;",
+ "description": "Named entity: scaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0161"
+ ]
+ ]
+ },
+ {
+ "input": "&sccue",
+ "description": "Bad named entity: sccue without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sccue"
+ ]
+ ]
+ },
+ {
+ "input": "&sccue;",
+ "description": "Named entity: sccue; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227d"
+ ]
+ ]
+ },
+ {
+ "input": "&sce",
+ "description": "Bad named entity: sce without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sce"
+ ]
+ ]
+ },
+ {
+ "input": "&sce;",
+ "description": "Named entity: sce; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab0"
+ ]
+ ]
+ },
+ {
+ "input": "&scedil",
+ "description": "Bad named entity: scedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scedil"
+ ]
+ ]
+ },
+ {
+ "input": "&scedil;",
+ "description": "Named entity: scedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u015f"
+ ]
+ ]
+ },
+ {
+ "input": "&scirc",
+ "description": "Bad named entity: scirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scirc"
+ ]
+ ]
+ },
+ {
+ "input": "&scirc;",
+ "description": "Named entity: scirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u015d"
+ ]
+ ]
+ },
+ {
+ "input": "&scnE",
+ "description": "Bad named entity: scnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scnE"
+ ]
+ ]
+ },
+ {
+ "input": "&scnE;",
+ "description": "Named entity: scnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab6"
+ ]
+ ]
+ },
+ {
+ "input": "&scnap",
+ "description": "Bad named entity: scnap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scnap"
+ ]
+ ]
+ },
+ {
+ "input": "&scnap;",
+ "description": "Named entity: scnap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aba"
+ ]
+ ]
+ },
+ {
+ "input": "&scnsim",
+ "description": "Bad named entity: scnsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scnsim"
+ ]
+ ]
+ },
+ {
+ "input": "&scnsim;",
+ "description": "Named entity: scnsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e9"
+ ]
+ ]
+ },
+ {
+ "input": "&scpolint",
+ "description": "Bad named entity: scpolint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scpolint"
+ ]
+ ]
+ },
+ {
+ "input": "&scpolint;",
+ "description": "Named entity: scpolint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a13"
+ ]
+ ]
+ },
+ {
+ "input": "&scsim",
+ "description": "Bad named entity: scsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scsim"
+ ]
+ ]
+ },
+ {
+ "input": "&scsim;",
+ "description": "Named entity: scsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227f"
+ ]
+ ]
+ },
+ {
+ "input": "&scy",
+ "description": "Bad named entity: scy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&scy"
+ ]
+ ]
+ },
+ {
+ "input": "&scy;",
+ "description": "Named entity: scy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0441"
+ ]
+ ]
+ },
+ {
+ "input": "&sdot",
+ "description": "Bad named entity: sdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sdot"
+ ]
+ ]
+ },
+ {
+ "input": "&sdot;",
+ "description": "Named entity: sdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c5"
+ ]
+ ]
+ },
+ {
+ "input": "&sdotb",
+ "description": "Bad named entity: sdotb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sdotb"
+ ]
+ ]
+ },
+ {
+ "input": "&sdotb;",
+ "description": "Named entity: sdotb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a1"
+ ]
+ ]
+ },
+ {
+ "input": "&sdote",
+ "description": "Bad named entity: sdote without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sdote"
+ ]
+ ]
+ },
+ {
+ "input": "&sdote;",
+ "description": "Named entity: sdote; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a66"
+ ]
+ ]
+ },
+ {
+ "input": "&seArr",
+ "description": "Bad named entity: seArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&seArr"
+ ]
+ ]
+ },
+ {
+ "input": "&seArr;",
+ "description": "Named entity: seArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d8"
+ ]
+ ]
+ },
+ {
+ "input": "&searhk",
+ "description": "Bad named entity: searhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&searhk"
+ ]
+ ]
+ },
+ {
+ "input": "&searhk;",
+ "description": "Named entity: searhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2925"
+ ]
+ ]
+ },
+ {
+ "input": "&searr",
+ "description": "Bad named entity: searr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&searr"
+ ]
+ ]
+ },
+ {
+ "input": "&searr;",
+ "description": "Named entity: searr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2198"
+ ]
+ ]
+ },
+ {
+ "input": "&searrow",
+ "description": "Bad named entity: searrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&searrow"
+ ]
+ ]
+ },
+ {
+ "input": "&searrow;",
+ "description": "Named entity: searrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2198"
+ ]
+ ]
+ },
+ {
+ "input": "&sect",
+ "description": "Named entity: sect without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a7"
+ ]
+ ]
+ },
+ {
+ "input": "&sect;",
+ "description": "Named entity: sect; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a7"
+ ]
+ ]
+ },
+ {
+ "input": "&semi",
+ "description": "Bad named entity: semi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&semi"
+ ]
+ ]
+ },
+ {
+ "input": "&semi;",
+ "description": "Named entity: semi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ ";"
+ ]
+ ]
+ },
+ {
+ "input": "&seswar",
+ "description": "Bad named entity: seswar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&seswar"
+ ]
+ ]
+ },
+ {
+ "input": "&seswar;",
+ "description": "Named entity: seswar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2929"
+ ]
+ ]
+ },
+ {
+ "input": "&setminus",
+ "description": "Bad named entity: setminus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&setminus"
+ ]
+ ]
+ },
+ {
+ "input": "&setminus;",
+ "description": "Named entity: setminus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2216"
+ ]
+ ]
+ },
+ {
+ "input": "&setmn",
+ "description": "Bad named entity: setmn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&setmn"
+ ]
+ ]
+ },
+ {
+ "input": "&setmn;",
+ "description": "Named entity: setmn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2216"
+ ]
+ ]
+ },
+ {
+ "input": "&sext",
+ "description": "Bad named entity: sext without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sext"
+ ]
+ ]
+ },
+ {
+ "input": "&sext;",
+ "description": "Named entity: sext; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2736"
+ ]
+ ]
+ },
+ {
+ "input": "&sfr",
+ "description": "Bad named entity: sfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sfr"
+ ]
+ ]
+ },
+ {
+ "input": "&sfr;",
+ "description": "Named entity: sfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd30"
+ ]
+ ]
+ },
+ {
+ "input": "&sfrown",
+ "description": "Bad named entity: sfrown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sfrown"
+ ]
+ ]
+ },
+ {
+ "input": "&sfrown;",
+ "description": "Named entity: sfrown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2322"
+ ]
+ ]
+ },
+ {
+ "input": "&sharp",
+ "description": "Bad named entity: sharp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sharp"
+ ]
+ ]
+ },
+ {
+ "input": "&sharp;",
+ "description": "Named entity: sharp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u266f"
+ ]
+ ]
+ },
+ {
+ "input": "&shchcy",
+ "description": "Bad named entity: shchcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&shchcy"
+ ]
+ ]
+ },
+ {
+ "input": "&shchcy;",
+ "description": "Named entity: shchcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0449"
+ ]
+ ]
+ },
+ {
+ "input": "&shcy",
+ "description": "Bad named entity: shcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&shcy"
+ ]
+ ]
+ },
+ {
+ "input": "&shcy;",
+ "description": "Named entity: shcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0448"
+ ]
+ ]
+ },
+ {
+ "input": "&shortmid",
+ "description": "Bad named entity: shortmid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&shortmid"
+ ]
+ ]
+ },
+ {
+ "input": "&shortmid;",
+ "description": "Named entity: shortmid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2223"
+ ]
+ ]
+ },
+ {
+ "input": "&shortparallel",
+ "description": "Bad named entity: shortparallel without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&shortparallel"
+ ]
+ ]
+ },
+ {
+ "input": "&shortparallel;",
+ "description": "Named entity: shortparallel; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2225"
+ ]
+ ]
+ },
+ {
+ "input": "&shy",
+ "description": "Named entity: shy without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ad"
+ ]
+ ]
+ },
+ {
+ "input": "&shy;",
+ "description": "Named entity: shy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ad"
+ ]
+ ]
+ },
+ {
+ "input": "&sigma",
+ "description": "Bad named entity: sigma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sigma"
+ ]
+ ]
+ },
+ {
+ "input": "&sigma;",
+ "description": "Named entity: sigma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c3"
+ ]
+ ]
+ },
+ {
+ "input": "&sigmaf",
+ "description": "Bad named entity: sigmaf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sigmaf"
+ ]
+ ]
+ },
+ {
+ "input": "&sigmaf;",
+ "description": "Named entity: sigmaf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c2"
+ ]
+ ]
+ },
+ {
+ "input": "&sigmav",
+ "description": "Bad named entity: sigmav without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sigmav"
+ ]
+ ]
+ },
+ {
+ "input": "&sigmav;",
+ "description": "Named entity: sigmav; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c2"
+ ]
+ ]
+ },
+ {
+ "input": "&sim",
+ "description": "Bad named entity: sim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sim"
+ ]
+ ]
+ },
+ {
+ "input": "&sim;",
+ "description": "Named entity: sim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223c"
+ ]
+ ]
+ },
+ {
+ "input": "&simdot",
+ "description": "Bad named entity: simdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simdot"
+ ]
+ ]
+ },
+ {
+ "input": "&simdot;",
+ "description": "Named entity: simdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a6a"
+ ]
+ ]
+ },
+ {
+ "input": "&sime",
+ "description": "Bad named entity: sime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sime"
+ ]
+ ]
+ },
+ {
+ "input": "&sime;",
+ "description": "Named entity: sime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2243"
+ ]
+ ]
+ },
+ {
+ "input": "&simeq",
+ "description": "Bad named entity: simeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simeq"
+ ]
+ ]
+ },
+ {
+ "input": "&simeq;",
+ "description": "Named entity: simeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2243"
+ ]
+ ]
+ },
+ {
+ "input": "&simg",
+ "description": "Bad named entity: simg without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simg"
+ ]
+ ]
+ },
+ {
+ "input": "&simg;",
+ "description": "Named entity: simg; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a9e"
+ ]
+ ]
+ },
+ {
+ "input": "&simgE",
+ "description": "Bad named entity: simgE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simgE"
+ ]
+ ]
+ },
+ {
+ "input": "&simgE;",
+ "description": "Named entity: simgE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aa0"
+ ]
+ ]
+ },
+ {
+ "input": "&siml",
+ "description": "Bad named entity: siml without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&siml"
+ ]
+ ]
+ },
+ {
+ "input": "&siml;",
+ "description": "Named entity: siml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a9d"
+ ]
+ ]
+ },
+ {
+ "input": "&simlE",
+ "description": "Bad named entity: simlE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simlE"
+ ]
+ ]
+ },
+ {
+ "input": "&simlE;",
+ "description": "Named entity: simlE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a9f"
+ ]
+ ]
+ },
+ {
+ "input": "&simne",
+ "description": "Bad named entity: simne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simne"
+ ]
+ ]
+ },
+ {
+ "input": "&simne;",
+ "description": "Named entity: simne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2246"
+ ]
+ ]
+ },
+ {
+ "input": "&simplus",
+ "description": "Bad named entity: simplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simplus"
+ ]
+ ]
+ },
+ {
+ "input": "&simplus;",
+ "description": "Named entity: simplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a24"
+ ]
+ ]
+ },
+ {
+ "input": "&simrarr",
+ "description": "Bad named entity: simrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&simrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&simrarr;",
+ "description": "Named entity: simrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2972"
+ ]
+ ]
+ },
+ {
+ "input": "&slarr",
+ "description": "Bad named entity: slarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&slarr"
+ ]
+ ]
+ },
+ {
+ "input": "&slarr;",
+ "description": "Named entity: slarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2190"
+ ]
+ ]
+ },
+ {
+ "input": "&smallsetminus",
+ "description": "Bad named entity: smallsetminus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smallsetminus"
+ ]
+ ]
+ },
+ {
+ "input": "&smallsetminus;",
+ "description": "Named entity: smallsetminus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2216"
+ ]
+ ]
+ },
+ {
+ "input": "&smashp",
+ "description": "Bad named entity: smashp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smashp"
+ ]
+ ]
+ },
+ {
+ "input": "&smashp;",
+ "description": "Named entity: smashp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a33"
+ ]
+ ]
+ },
+ {
+ "input": "&smeparsl",
+ "description": "Bad named entity: smeparsl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smeparsl"
+ ]
+ ]
+ },
+ {
+ "input": "&smeparsl;",
+ "description": "Named entity: smeparsl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29e4"
+ ]
+ ]
+ },
+ {
+ "input": "&smid",
+ "description": "Bad named entity: smid without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smid"
+ ]
+ ]
+ },
+ {
+ "input": "&smid;",
+ "description": "Named entity: smid; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2223"
+ ]
+ ]
+ },
+ {
+ "input": "&smile",
+ "description": "Bad named entity: smile without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smile"
+ ]
+ ]
+ },
+ {
+ "input": "&smile;",
+ "description": "Named entity: smile; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2323"
+ ]
+ ]
+ },
+ {
+ "input": "&smt",
+ "description": "Bad named entity: smt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smt"
+ ]
+ ]
+ },
+ {
+ "input": "&smt;",
+ "description": "Named entity: smt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aaa"
+ ]
+ ]
+ },
+ {
+ "input": "&smte",
+ "description": "Bad named entity: smte without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smte"
+ ]
+ ]
+ },
+ {
+ "input": "&smte;",
+ "description": "Named entity: smte; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aac"
+ ]
+ ]
+ },
+ {
+ "input": "&smtes",
+ "description": "Bad named entity: smtes without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&smtes"
+ ]
+ ]
+ },
+ {
+ "input": "&smtes;",
+ "description": "Named entity: smtes; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aac\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&softcy",
+ "description": "Bad named entity: softcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&softcy"
+ ]
+ ]
+ },
+ {
+ "input": "&softcy;",
+ "description": "Named entity: softcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u044c"
+ ]
+ ]
+ },
+ {
+ "input": "&sol",
+ "description": "Bad named entity: sol without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sol"
+ ]
+ ]
+ },
+ {
+ "input": "&sol;",
+ "description": "Named entity: sol; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "/"
+ ]
+ ]
+ },
+ {
+ "input": "&solb",
+ "description": "Bad named entity: solb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&solb"
+ ]
+ ]
+ },
+ {
+ "input": "&solb;",
+ "description": "Named entity: solb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29c4"
+ ]
+ ]
+ },
+ {
+ "input": "&solbar",
+ "description": "Bad named entity: solbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&solbar"
+ ]
+ ]
+ },
+ {
+ "input": "&solbar;",
+ "description": "Named entity: solbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u233f"
+ ]
+ ]
+ },
+ {
+ "input": "&sopf",
+ "description": "Bad named entity: sopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sopf"
+ ]
+ ]
+ },
+ {
+ "input": "&sopf;",
+ "description": "Named entity: sopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd64"
+ ]
+ ]
+ },
+ {
+ "input": "&spades",
+ "description": "Bad named entity: spades without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&spades"
+ ]
+ ]
+ },
+ {
+ "input": "&spades;",
+ "description": "Named entity: spades; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2660"
+ ]
+ ]
+ },
+ {
+ "input": "&spadesuit",
+ "description": "Bad named entity: spadesuit without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&spadesuit"
+ ]
+ ]
+ },
+ {
+ "input": "&spadesuit;",
+ "description": "Named entity: spadesuit; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2660"
+ ]
+ ]
+ },
+ {
+ "input": "&spar",
+ "description": "Bad named entity: spar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&spar"
+ ]
+ ]
+ },
+ {
+ "input": "&spar;",
+ "description": "Named entity: spar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2225"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcap",
+ "description": "Bad named entity: sqcap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqcap"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcap;",
+ "description": "Named entity: sqcap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2293"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcaps",
+ "description": "Bad named entity: sqcaps without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqcaps"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcaps;",
+ "description": "Named entity: sqcaps; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2293\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcup",
+ "description": "Bad named entity: sqcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqcup"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcup;",
+ "description": "Named entity: sqcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2294"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcups",
+ "description": "Bad named entity: sqcups without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqcups"
+ ]
+ ]
+ },
+ {
+ "input": "&sqcups;",
+ "description": "Named entity: sqcups; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2294\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsub",
+ "description": "Bad named entity: sqsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsub"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsub;",
+ "description": "Named entity: sqsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228f"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsube",
+ "description": "Bad named entity: sqsube without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsube"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsube;",
+ "description": "Named entity: sqsube; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2291"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsubset",
+ "description": "Bad named entity: sqsubset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsubset"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsubset;",
+ "description": "Named entity: sqsubset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228f"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsubseteq",
+ "description": "Bad named entity: sqsubseteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsubseteq"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsubseteq;",
+ "description": "Named entity: sqsubseteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2291"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsup",
+ "description": "Bad named entity: sqsup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsup"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsup;",
+ "description": "Named entity: sqsup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2290"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsupe",
+ "description": "Bad named entity: sqsupe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsupe"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsupe;",
+ "description": "Named entity: sqsupe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2292"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsupset",
+ "description": "Bad named entity: sqsupset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsupset"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsupset;",
+ "description": "Named entity: sqsupset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2290"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsupseteq",
+ "description": "Bad named entity: sqsupseteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sqsupseteq"
+ ]
+ ]
+ },
+ {
+ "input": "&sqsupseteq;",
+ "description": "Named entity: sqsupseteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2292"
+ ]
+ ]
+ },
+ {
+ "input": "&squ",
+ "description": "Bad named entity: squ without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&squ"
+ ]
+ ]
+ },
+ {
+ "input": "&squ;",
+ "description": "Named entity: squ; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25a1"
+ ]
+ ]
+ },
+ {
+ "input": "&square",
+ "description": "Bad named entity: square without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&square"
+ ]
+ ]
+ },
+ {
+ "input": "&square;",
+ "description": "Named entity: square; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25a1"
+ ]
+ ]
+ },
+ {
+ "input": "&squarf",
+ "description": "Bad named entity: squarf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&squarf"
+ ]
+ ]
+ },
+ {
+ "input": "&squarf;",
+ "description": "Named entity: squarf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25aa"
+ ]
+ ]
+ },
+ {
+ "input": "&squf",
+ "description": "Bad named entity: squf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&squf"
+ ]
+ ]
+ },
+ {
+ "input": "&squf;",
+ "description": "Named entity: squf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25aa"
+ ]
+ ]
+ },
+ {
+ "input": "&srarr",
+ "description": "Bad named entity: srarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&srarr"
+ ]
+ ]
+ },
+ {
+ "input": "&srarr;",
+ "description": "Named entity: srarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2192"
+ ]
+ ]
+ },
+ {
+ "input": "&sscr",
+ "description": "Bad named entity: sscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sscr"
+ ]
+ ]
+ },
+ {
+ "input": "&sscr;",
+ "description": "Named entity: sscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc8"
+ ]
+ ]
+ },
+ {
+ "input": "&ssetmn",
+ "description": "Bad named entity: ssetmn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ssetmn"
+ ]
+ ]
+ },
+ {
+ "input": "&ssetmn;",
+ "description": "Named entity: ssetmn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2216"
+ ]
+ ]
+ },
+ {
+ "input": "&ssmile",
+ "description": "Bad named entity: ssmile without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ssmile"
+ ]
+ ]
+ },
+ {
+ "input": "&ssmile;",
+ "description": "Named entity: ssmile; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2323"
+ ]
+ ]
+ },
+ {
+ "input": "&sstarf",
+ "description": "Bad named entity: sstarf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sstarf"
+ ]
+ ]
+ },
+ {
+ "input": "&sstarf;",
+ "description": "Named entity: sstarf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c6"
+ ]
+ ]
+ },
+ {
+ "input": "&star",
+ "description": "Bad named entity: star without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&star"
+ ]
+ ]
+ },
+ {
+ "input": "&star;",
+ "description": "Named entity: star; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2606"
+ ]
+ ]
+ },
+ {
+ "input": "&starf",
+ "description": "Bad named entity: starf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&starf"
+ ]
+ ]
+ },
+ {
+ "input": "&starf;",
+ "description": "Named entity: starf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2605"
+ ]
+ ]
+ },
+ {
+ "input": "&straightepsilon",
+ "description": "Bad named entity: straightepsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&straightepsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&straightepsilon;",
+ "description": "Named entity: straightepsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f5"
+ ]
+ ]
+ },
+ {
+ "input": "&straightphi",
+ "description": "Bad named entity: straightphi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&straightphi"
+ ]
+ ]
+ },
+ {
+ "input": "&straightphi;",
+ "description": "Named entity: straightphi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d5"
+ ]
+ ]
+ },
+ {
+ "input": "&strns",
+ "description": "Bad named entity: strns without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&strns"
+ ]
+ ]
+ },
+ {
+ "input": "&strns;",
+ "description": "Named entity: strns; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00af"
+ ]
+ ]
+ },
+ {
+ "input": "&sub",
+ "description": "Bad named entity: sub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sub"
+ ]
+ ]
+ },
+ {
+ "input": "&sub;",
+ "description": "Named entity: sub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2282"
+ ]
+ ]
+ },
+ {
+ "input": "&subE",
+ "description": "Bad named entity: subE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subE"
+ ]
+ ]
+ },
+ {
+ "input": "&subE;",
+ "description": "Named entity: subE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac5"
+ ]
+ ]
+ },
+ {
+ "input": "&subdot",
+ "description": "Bad named entity: subdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subdot"
+ ]
+ ]
+ },
+ {
+ "input": "&subdot;",
+ "description": "Named entity: subdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2abd"
+ ]
+ ]
+ },
+ {
+ "input": "&sube",
+ "description": "Bad named entity: sube without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sube"
+ ]
+ ]
+ },
+ {
+ "input": "&sube;",
+ "description": "Named entity: sube; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2286"
+ ]
+ ]
+ },
+ {
+ "input": "&subedot",
+ "description": "Bad named entity: subedot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subedot"
+ ]
+ ]
+ },
+ {
+ "input": "&subedot;",
+ "description": "Named entity: subedot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac3"
+ ]
+ ]
+ },
+ {
+ "input": "&submult",
+ "description": "Bad named entity: submult without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&submult"
+ ]
+ ]
+ },
+ {
+ "input": "&submult;",
+ "description": "Named entity: submult; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac1"
+ ]
+ ]
+ },
+ {
+ "input": "&subnE",
+ "description": "Bad named entity: subnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subnE"
+ ]
+ ]
+ },
+ {
+ "input": "&subnE;",
+ "description": "Named entity: subnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acb"
+ ]
+ ]
+ },
+ {
+ "input": "&subne",
+ "description": "Bad named entity: subne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subne"
+ ]
+ ]
+ },
+ {
+ "input": "&subne;",
+ "description": "Named entity: subne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228a"
+ ]
+ ]
+ },
+ {
+ "input": "&subplus",
+ "description": "Bad named entity: subplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subplus"
+ ]
+ ]
+ },
+ {
+ "input": "&subplus;",
+ "description": "Named entity: subplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2abf"
+ ]
+ ]
+ },
+ {
+ "input": "&subrarr",
+ "description": "Bad named entity: subrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&subrarr;",
+ "description": "Named entity: subrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2979"
+ ]
+ ]
+ },
+ {
+ "input": "&subset",
+ "description": "Bad named entity: subset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subset"
+ ]
+ ]
+ },
+ {
+ "input": "&subset;",
+ "description": "Named entity: subset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2282"
+ ]
+ ]
+ },
+ {
+ "input": "&subseteq",
+ "description": "Bad named entity: subseteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subseteq"
+ ]
+ ]
+ },
+ {
+ "input": "&subseteq;",
+ "description": "Named entity: subseteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2286"
+ ]
+ ]
+ },
+ {
+ "input": "&subseteqq",
+ "description": "Bad named entity: subseteqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subseteqq"
+ ]
+ ]
+ },
+ {
+ "input": "&subseteqq;",
+ "description": "Named entity: subseteqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac5"
+ ]
+ ]
+ },
+ {
+ "input": "&subsetneq",
+ "description": "Bad named entity: subsetneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subsetneq"
+ ]
+ ]
+ },
+ {
+ "input": "&subsetneq;",
+ "description": "Named entity: subsetneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228a"
+ ]
+ ]
+ },
+ {
+ "input": "&subsetneqq",
+ "description": "Bad named entity: subsetneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subsetneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&subsetneqq;",
+ "description": "Named entity: subsetneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acb"
+ ]
+ ]
+ },
+ {
+ "input": "&subsim",
+ "description": "Bad named entity: subsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subsim"
+ ]
+ ]
+ },
+ {
+ "input": "&subsim;",
+ "description": "Named entity: subsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac7"
+ ]
+ ]
+ },
+ {
+ "input": "&subsub",
+ "description": "Bad named entity: subsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subsub"
+ ]
+ ]
+ },
+ {
+ "input": "&subsub;",
+ "description": "Named entity: subsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad5"
+ ]
+ ]
+ },
+ {
+ "input": "&subsup",
+ "description": "Bad named entity: subsup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&subsup"
+ ]
+ ]
+ },
+ {
+ "input": "&subsup;",
+ "description": "Named entity: subsup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad3"
+ ]
+ ]
+ },
+ {
+ "input": "&succ",
+ "description": "Bad named entity: succ without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succ"
+ ]
+ ]
+ },
+ {
+ "input": "&succ;",
+ "description": "Named entity: succ; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227b"
+ ]
+ ]
+ },
+ {
+ "input": "&succapprox",
+ "description": "Bad named entity: succapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&succapprox;",
+ "description": "Named entity: succapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab8"
+ ]
+ ]
+ },
+ {
+ "input": "&succcurlyeq",
+ "description": "Bad named entity: succcurlyeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succcurlyeq"
+ ]
+ ]
+ },
+ {
+ "input": "&succcurlyeq;",
+ "description": "Named entity: succcurlyeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227d"
+ ]
+ ]
+ },
+ {
+ "input": "&succeq",
+ "description": "Bad named entity: succeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succeq"
+ ]
+ ]
+ },
+ {
+ "input": "&succeq;",
+ "description": "Named entity: succeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab0"
+ ]
+ ]
+ },
+ {
+ "input": "&succnapprox",
+ "description": "Bad named entity: succnapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succnapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&succnapprox;",
+ "description": "Named entity: succnapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2aba"
+ ]
+ ]
+ },
+ {
+ "input": "&succneqq",
+ "description": "Bad named entity: succneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&succneqq;",
+ "description": "Named entity: succneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ab6"
+ ]
+ ]
+ },
+ {
+ "input": "&succnsim",
+ "description": "Bad named entity: succnsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succnsim"
+ ]
+ ]
+ },
+ {
+ "input": "&succnsim;",
+ "description": "Named entity: succnsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22e9"
+ ]
+ ]
+ },
+ {
+ "input": "&succsim",
+ "description": "Bad named entity: succsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&succsim"
+ ]
+ ]
+ },
+ {
+ "input": "&succsim;",
+ "description": "Named entity: succsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u227f"
+ ]
+ ]
+ },
+ {
+ "input": "&sum",
+ "description": "Bad named entity: sum without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sum"
+ ]
+ ]
+ },
+ {
+ "input": "&sum;",
+ "description": "Named entity: sum; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2211"
+ ]
+ ]
+ },
+ {
+ "input": "&sung",
+ "description": "Bad named entity: sung without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sung"
+ ]
+ ]
+ },
+ {
+ "input": "&sung;",
+ "description": "Named entity: sung; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u266a"
+ ]
+ ]
+ },
+ {
+ "input": "&sup",
+ "description": "Bad named entity: sup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&sup"
+ ]
+ ]
+ },
+ {
+ "input": "&sup1",
+ "description": "Named entity: sup1 without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b9"
+ ]
+ ]
+ },
+ {
+ "input": "&sup1;",
+ "description": "Named entity: sup1; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b9"
+ ]
+ ]
+ },
+ {
+ "input": "&sup2",
+ "description": "Named entity: sup2 without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b2"
+ ]
+ ]
+ },
+ {
+ "input": "&sup2;",
+ "description": "Named entity: sup2; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b2"
+ ]
+ ]
+ },
+ {
+ "input": "&sup3",
+ "description": "Named entity: sup3 without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00b3"
+ ]
+ ]
+ },
+ {
+ "input": "&sup3;",
+ "description": "Named entity: sup3; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00b3"
+ ]
+ ]
+ },
+ {
+ "input": "&sup;",
+ "description": "Named entity: sup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2283"
+ ]
+ ]
+ },
+ {
+ "input": "&supE",
+ "description": "Bad named entity: supE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supE"
+ ]
+ ]
+ },
+ {
+ "input": "&supE;",
+ "description": "Named entity: supE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac6"
+ ]
+ ]
+ },
+ {
+ "input": "&supdot",
+ "description": "Bad named entity: supdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supdot"
+ ]
+ ]
+ },
+ {
+ "input": "&supdot;",
+ "description": "Named entity: supdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2abe"
+ ]
+ ]
+ },
+ {
+ "input": "&supdsub",
+ "description": "Bad named entity: supdsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supdsub"
+ ]
+ ]
+ },
+ {
+ "input": "&supdsub;",
+ "description": "Named entity: supdsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad8"
+ ]
+ ]
+ },
+ {
+ "input": "&supe",
+ "description": "Bad named entity: supe without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supe"
+ ]
+ ]
+ },
+ {
+ "input": "&supe;",
+ "description": "Named entity: supe; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2287"
+ ]
+ ]
+ },
+ {
+ "input": "&supedot",
+ "description": "Bad named entity: supedot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supedot"
+ ]
+ ]
+ },
+ {
+ "input": "&supedot;",
+ "description": "Named entity: supedot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac4"
+ ]
+ ]
+ },
+ {
+ "input": "&suphsol",
+ "description": "Bad named entity: suphsol without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&suphsol"
+ ]
+ ]
+ },
+ {
+ "input": "&suphsol;",
+ "description": "Named entity: suphsol; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27c9"
+ ]
+ ]
+ },
+ {
+ "input": "&suphsub",
+ "description": "Bad named entity: suphsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&suphsub"
+ ]
+ ]
+ },
+ {
+ "input": "&suphsub;",
+ "description": "Named entity: suphsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad7"
+ ]
+ ]
+ },
+ {
+ "input": "&suplarr",
+ "description": "Bad named entity: suplarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&suplarr"
+ ]
+ ]
+ },
+ {
+ "input": "&suplarr;",
+ "description": "Named entity: suplarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u297b"
+ ]
+ ]
+ },
+ {
+ "input": "&supmult",
+ "description": "Bad named entity: supmult without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supmult"
+ ]
+ ]
+ },
+ {
+ "input": "&supmult;",
+ "description": "Named entity: supmult; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac2"
+ ]
+ ]
+ },
+ {
+ "input": "&supnE",
+ "description": "Bad named entity: supnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supnE"
+ ]
+ ]
+ },
+ {
+ "input": "&supnE;",
+ "description": "Named entity: supnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acc"
+ ]
+ ]
+ },
+ {
+ "input": "&supne",
+ "description": "Bad named entity: supne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supne"
+ ]
+ ]
+ },
+ {
+ "input": "&supne;",
+ "description": "Named entity: supne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228b"
+ ]
+ ]
+ },
+ {
+ "input": "&supplus",
+ "description": "Bad named entity: supplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supplus"
+ ]
+ ]
+ },
+ {
+ "input": "&supplus;",
+ "description": "Named entity: supplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac0"
+ ]
+ ]
+ },
+ {
+ "input": "&supset",
+ "description": "Bad named entity: supset without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supset"
+ ]
+ ]
+ },
+ {
+ "input": "&supset;",
+ "description": "Named entity: supset; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2283"
+ ]
+ ]
+ },
+ {
+ "input": "&supseteq",
+ "description": "Bad named entity: supseteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supseteq"
+ ]
+ ]
+ },
+ {
+ "input": "&supseteq;",
+ "description": "Named entity: supseteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2287"
+ ]
+ ]
+ },
+ {
+ "input": "&supseteqq",
+ "description": "Bad named entity: supseteqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supseteqq"
+ ]
+ ]
+ },
+ {
+ "input": "&supseteqq;",
+ "description": "Named entity: supseteqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac6"
+ ]
+ ]
+ },
+ {
+ "input": "&supsetneq",
+ "description": "Bad named entity: supsetneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supsetneq"
+ ]
+ ]
+ },
+ {
+ "input": "&supsetneq;",
+ "description": "Named entity: supsetneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228b"
+ ]
+ ]
+ },
+ {
+ "input": "&supsetneqq",
+ "description": "Bad named entity: supsetneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supsetneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&supsetneqq;",
+ "description": "Named entity: supsetneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acc"
+ ]
+ ]
+ },
+ {
+ "input": "&supsim",
+ "description": "Bad named entity: supsim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supsim"
+ ]
+ ]
+ },
+ {
+ "input": "&supsim;",
+ "description": "Named entity: supsim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ac8"
+ ]
+ ]
+ },
+ {
+ "input": "&supsub",
+ "description": "Bad named entity: supsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supsub"
+ ]
+ ]
+ },
+ {
+ "input": "&supsub;",
+ "description": "Named entity: supsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad4"
+ ]
+ ]
+ },
+ {
+ "input": "&supsup",
+ "description": "Bad named entity: supsup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&supsup"
+ ]
+ ]
+ },
+ {
+ "input": "&supsup;",
+ "description": "Named entity: supsup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ad6"
+ ]
+ ]
+ },
+ {
+ "input": "&swArr",
+ "description": "Bad named entity: swArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&swArr"
+ ]
+ ]
+ },
+ {
+ "input": "&swArr;",
+ "description": "Named entity: swArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d9"
+ ]
+ ]
+ },
+ {
+ "input": "&swarhk",
+ "description": "Bad named entity: swarhk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&swarhk"
+ ]
+ ]
+ },
+ {
+ "input": "&swarhk;",
+ "description": "Named entity: swarhk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2926"
+ ]
+ ]
+ },
+ {
+ "input": "&swarr",
+ "description": "Bad named entity: swarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&swarr"
+ ]
+ ]
+ },
+ {
+ "input": "&swarr;",
+ "description": "Named entity: swarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2199"
+ ]
+ ]
+ },
+ {
+ "input": "&swarrow",
+ "description": "Bad named entity: swarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&swarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&swarrow;",
+ "description": "Named entity: swarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2199"
+ ]
+ ]
+ },
+ {
+ "input": "&swnwar",
+ "description": "Bad named entity: swnwar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&swnwar"
+ ]
+ ]
+ },
+ {
+ "input": "&swnwar;",
+ "description": "Named entity: swnwar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u292a"
+ ]
+ ]
+ },
+ {
+ "input": "&szlig",
+ "description": "Named entity: szlig without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00df"
+ ]
+ ]
+ },
+ {
+ "input": "&szlig;",
+ "description": "Named entity: szlig; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00df"
+ ]
+ ]
+ },
+ {
+ "input": "&target",
+ "description": "Bad named entity: target without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&target"
+ ]
+ ]
+ },
+ {
+ "input": "&target;",
+ "description": "Named entity: target; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2316"
+ ]
+ ]
+ },
+ {
+ "input": "&tau",
+ "description": "Bad named entity: tau without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tau"
+ ]
+ ]
+ },
+ {
+ "input": "&tau;",
+ "description": "Named entity: tau; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c4"
+ ]
+ ]
+ },
+ {
+ "input": "&tbrk",
+ "description": "Bad named entity: tbrk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tbrk"
+ ]
+ ]
+ },
+ {
+ "input": "&tbrk;",
+ "description": "Named entity: tbrk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23b4"
+ ]
+ ]
+ },
+ {
+ "input": "&tcaron",
+ "description": "Bad named entity: tcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&tcaron;",
+ "description": "Named entity: tcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0165"
+ ]
+ ]
+ },
+ {
+ "input": "&tcedil",
+ "description": "Bad named entity: tcedil without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tcedil"
+ ]
+ ]
+ },
+ {
+ "input": "&tcedil;",
+ "description": "Named entity: tcedil; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0163"
+ ]
+ ]
+ },
+ {
+ "input": "&tcy",
+ "description": "Bad named entity: tcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tcy"
+ ]
+ ]
+ },
+ {
+ "input": "&tcy;",
+ "description": "Named entity: tcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0442"
+ ]
+ ]
+ },
+ {
+ "input": "&tdot",
+ "description": "Bad named entity: tdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tdot"
+ ]
+ ]
+ },
+ {
+ "input": "&tdot;",
+ "description": "Named entity: tdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u20db"
+ ]
+ ]
+ },
+ {
+ "input": "&telrec",
+ "description": "Bad named entity: telrec without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&telrec"
+ ]
+ ]
+ },
+ {
+ "input": "&telrec;",
+ "description": "Named entity: telrec; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2315"
+ ]
+ ]
+ },
+ {
+ "input": "&tfr",
+ "description": "Bad named entity: tfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tfr"
+ ]
+ ]
+ },
+ {
+ "input": "&tfr;",
+ "description": "Named entity: tfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd31"
+ ]
+ ]
+ },
+ {
+ "input": "&there4",
+ "description": "Bad named entity: there4 without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&there4"
+ ]
+ ]
+ },
+ {
+ "input": "&there4;",
+ "description": "Named entity: there4; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2234"
+ ]
+ ]
+ },
+ {
+ "input": "&therefore",
+ "description": "Bad named entity: therefore without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&therefore"
+ ]
+ ]
+ },
+ {
+ "input": "&therefore;",
+ "description": "Named entity: therefore; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2234"
+ ]
+ ]
+ },
+ {
+ "input": "&theta",
+ "description": "Bad named entity: theta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&theta"
+ ]
+ ]
+ },
+ {
+ "input": "&theta;",
+ "description": "Named entity: theta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b8"
+ ]
+ ]
+ },
+ {
+ "input": "&thetasym",
+ "description": "Bad named entity: thetasym without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thetasym"
+ ]
+ ]
+ },
+ {
+ "input": "&thetasym;",
+ "description": "Named entity: thetasym; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d1"
+ ]
+ ]
+ },
+ {
+ "input": "&thetav",
+ "description": "Bad named entity: thetav without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thetav"
+ ]
+ ]
+ },
+ {
+ "input": "&thetav;",
+ "description": "Named entity: thetav; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d1"
+ ]
+ ]
+ },
+ {
+ "input": "&thickapprox",
+ "description": "Bad named entity: thickapprox without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thickapprox"
+ ]
+ ]
+ },
+ {
+ "input": "&thickapprox;",
+ "description": "Named entity: thickapprox; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2248"
+ ]
+ ]
+ },
+ {
+ "input": "&thicksim",
+ "description": "Bad named entity: thicksim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thicksim"
+ ]
+ ]
+ },
+ {
+ "input": "&thicksim;",
+ "description": "Named entity: thicksim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223c"
+ ]
+ ]
+ },
+ {
+ "input": "&thinsp",
+ "description": "Bad named entity: thinsp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thinsp"
+ ]
+ ]
+ },
+ {
+ "input": "&thinsp;",
+ "description": "Named entity: thinsp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2009"
+ ]
+ ]
+ },
+ {
+ "input": "&thkap",
+ "description": "Bad named entity: thkap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thkap"
+ ]
+ ]
+ },
+ {
+ "input": "&thkap;",
+ "description": "Named entity: thkap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2248"
+ ]
+ ]
+ },
+ {
+ "input": "&thksim",
+ "description": "Bad named entity: thksim without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&thksim"
+ ]
+ ]
+ },
+ {
+ "input": "&thksim;",
+ "description": "Named entity: thksim; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u223c"
+ ]
+ ]
+ },
+ {
+ "input": "&thorn",
+ "description": "Named entity: thorn without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00fe"
+ ]
+ ]
+ },
+ {
+ "input": "&thorn;",
+ "description": "Named entity: thorn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00fe"
+ ]
+ ]
+ },
+ {
+ "input": "&tilde",
+ "description": "Bad named entity: tilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tilde"
+ ]
+ ]
+ },
+ {
+ "input": "&tilde;",
+ "description": "Named entity: tilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u02dc"
+ ]
+ ]
+ },
+ {
+ "input": "&times",
+ "description": "Named entity: times without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00d7"
+ ]
+ ]
+ },
+ {
+ "input": "&times;",
+ "description": "Named entity: times; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00d7"
+ ]
+ ]
+ },
+ {
+ "input": "&timesb;",
+ "description": "Named entity: timesb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a0"
+ ]
+ ]
+ },
+ {
+ "input": "&timesbar;",
+ "description": "Named entity: timesbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a31"
+ ]
+ ]
+ },
+ {
+ "input": "&timesd;",
+ "description": "Named entity: timesd; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a30"
+ ]
+ ]
+ },
+ {
+ "input": "&tint",
+ "description": "Bad named entity: tint without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tint"
+ ]
+ ]
+ },
+ {
+ "input": "&tint;",
+ "description": "Named entity: tint; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u222d"
+ ]
+ ]
+ },
+ {
+ "input": "&toea",
+ "description": "Bad named entity: toea without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&toea"
+ ]
+ ]
+ },
+ {
+ "input": "&toea;",
+ "description": "Named entity: toea; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2928"
+ ]
+ ]
+ },
+ {
+ "input": "&top",
+ "description": "Bad named entity: top without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&top"
+ ]
+ ]
+ },
+ {
+ "input": "&top;",
+ "description": "Named entity: top; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a4"
+ ]
+ ]
+ },
+ {
+ "input": "&topbot",
+ "description": "Bad named entity: topbot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&topbot"
+ ]
+ ]
+ },
+ {
+ "input": "&topbot;",
+ "description": "Named entity: topbot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2336"
+ ]
+ ]
+ },
+ {
+ "input": "&topcir",
+ "description": "Bad named entity: topcir without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&topcir"
+ ]
+ ]
+ },
+ {
+ "input": "&topcir;",
+ "description": "Named entity: topcir; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2af1"
+ ]
+ ]
+ },
+ {
+ "input": "&topf",
+ "description": "Bad named entity: topf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&topf"
+ ]
+ ]
+ },
+ {
+ "input": "&topf;",
+ "description": "Named entity: topf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd65"
+ ]
+ ]
+ },
+ {
+ "input": "&topfork",
+ "description": "Bad named entity: topfork without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&topfork"
+ ]
+ ]
+ },
+ {
+ "input": "&topfork;",
+ "description": "Named entity: topfork; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ada"
+ ]
+ ]
+ },
+ {
+ "input": "&tosa",
+ "description": "Bad named entity: tosa without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tosa"
+ ]
+ ]
+ },
+ {
+ "input": "&tosa;",
+ "description": "Named entity: tosa; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2929"
+ ]
+ ]
+ },
+ {
+ "input": "&tprime",
+ "description": "Bad named entity: tprime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tprime"
+ ]
+ ]
+ },
+ {
+ "input": "&tprime;",
+ "description": "Named entity: tprime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2034"
+ ]
+ ]
+ },
+ {
+ "input": "&trade",
+ "description": "Bad named entity: trade without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&trade"
+ ]
+ ]
+ },
+ {
+ "input": "&trade;",
+ "description": "Named entity: trade; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2122"
+ ]
+ ]
+ },
+ {
+ "input": "&triangle",
+ "description": "Bad named entity: triangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triangle"
+ ]
+ ]
+ },
+ {
+ "input": "&triangle;",
+ "description": "Named entity: triangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b5"
+ ]
+ ]
+ },
+ {
+ "input": "&triangledown",
+ "description": "Bad named entity: triangledown without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triangledown"
+ ]
+ ]
+ },
+ {
+ "input": "&triangledown;",
+ "description": "Named entity: triangledown; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25bf"
+ ]
+ ]
+ },
+ {
+ "input": "&triangleleft",
+ "description": "Bad named entity: triangleleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triangleleft"
+ ]
+ ]
+ },
+ {
+ "input": "&triangleleft;",
+ "description": "Named entity: triangleleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25c3"
+ ]
+ ]
+ },
+ {
+ "input": "&trianglelefteq",
+ "description": "Bad named entity: trianglelefteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&trianglelefteq"
+ ]
+ ]
+ },
+ {
+ "input": "&trianglelefteq;",
+ "description": "Named entity: trianglelefteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b4"
+ ]
+ ]
+ },
+ {
+ "input": "&triangleq",
+ "description": "Bad named entity: triangleq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triangleq"
+ ]
+ ]
+ },
+ {
+ "input": "&triangleq;",
+ "description": "Named entity: triangleq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u225c"
+ ]
+ ]
+ },
+ {
+ "input": "&triangleright",
+ "description": "Bad named entity: triangleright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triangleright"
+ ]
+ ]
+ },
+ {
+ "input": "&triangleright;",
+ "description": "Named entity: triangleright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b9"
+ ]
+ ]
+ },
+ {
+ "input": "&trianglerighteq",
+ "description": "Bad named entity: trianglerighteq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&trianglerighteq"
+ ]
+ ]
+ },
+ {
+ "input": "&trianglerighteq;",
+ "description": "Named entity: trianglerighteq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b5"
+ ]
+ ]
+ },
+ {
+ "input": "&tridot",
+ "description": "Bad named entity: tridot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tridot"
+ ]
+ ]
+ },
+ {
+ "input": "&tridot;",
+ "description": "Named entity: tridot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ec"
+ ]
+ ]
+ },
+ {
+ "input": "&trie",
+ "description": "Bad named entity: trie without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&trie"
+ ]
+ ]
+ },
+ {
+ "input": "&trie;",
+ "description": "Named entity: trie; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u225c"
+ ]
+ ]
+ },
+ {
+ "input": "&triminus",
+ "description": "Bad named entity: triminus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triminus"
+ ]
+ ]
+ },
+ {
+ "input": "&triminus;",
+ "description": "Named entity: triminus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a3a"
+ ]
+ ]
+ },
+ {
+ "input": "&triplus",
+ "description": "Bad named entity: triplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&triplus"
+ ]
+ ]
+ },
+ {
+ "input": "&triplus;",
+ "description": "Named entity: triplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a39"
+ ]
+ ]
+ },
+ {
+ "input": "&trisb",
+ "description": "Bad named entity: trisb without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&trisb"
+ ]
+ ]
+ },
+ {
+ "input": "&trisb;",
+ "description": "Named entity: trisb; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29cd"
+ ]
+ ]
+ },
+ {
+ "input": "&tritime",
+ "description": "Bad named entity: tritime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tritime"
+ ]
+ ]
+ },
+ {
+ "input": "&tritime;",
+ "description": "Named entity: tritime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a3b"
+ ]
+ ]
+ },
+ {
+ "input": "&trpezium",
+ "description": "Bad named entity: trpezium without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&trpezium"
+ ]
+ ]
+ },
+ {
+ "input": "&trpezium;",
+ "description": "Named entity: trpezium; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u23e2"
+ ]
+ ]
+ },
+ {
+ "input": "&tscr",
+ "description": "Bad named entity: tscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tscr"
+ ]
+ ]
+ },
+ {
+ "input": "&tscr;",
+ "description": "Named entity: tscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcc9"
+ ]
+ ]
+ },
+ {
+ "input": "&tscy",
+ "description": "Bad named entity: tscy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tscy"
+ ]
+ ]
+ },
+ {
+ "input": "&tscy;",
+ "description": "Named entity: tscy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0446"
+ ]
+ ]
+ },
+ {
+ "input": "&tshcy",
+ "description": "Bad named entity: tshcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tshcy"
+ ]
+ ]
+ },
+ {
+ "input": "&tshcy;",
+ "description": "Named entity: tshcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u045b"
+ ]
+ ]
+ },
+ {
+ "input": "&tstrok",
+ "description": "Bad named entity: tstrok without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&tstrok"
+ ]
+ ]
+ },
+ {
+ "input": "&tstrok;",
+ "description": "Named entity: tstrok; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0167"
+ ]
+ ]
+ },
+ {
+ "input": "&twixt",
+ "description": "Bad named entity: twixt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&twixt"
+ ]
+ ]
+ },
+ {
+ "input": "&twixt;",
+ "description": "Named entity: twixt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u226c"
+ ]
+ ]
+ },
+ {
+ "input": "&twoheadleftarrow",
+ "description": "Bad named entity: twoheadleftarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&twoheadleftarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&twoheadleftarrow;",
+ "description": "Named entity: twoheadleftarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u219e"
+ ]
+ ]
+ },
+ {
+ "input": "&twoheadrightarrow",
+ "description": "Bad named entity: twoheadrightarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&twoheadrightarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&twoheadrightarrow;",
+ "description": "Named entity: twoheadrightarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21a0"
+ ]
+ ]
+ },
+ {
+ "input": "&uArr",
+ "description": "Bad named entity: uArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uArr"
+ ]
+ ]
+ },
+ {
+ "input": "&uArr;",
+ "description": "Named entity: uArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d1"
+ ]
+ ]
+ },
+ {
+ "input": "&uHar",
+ "description": "Bad named entity: uHar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uHar"
+ ]
+ ]
+ },
+ {
+ "input": "&uHar;",
+ "description": "Named entity: uHar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2963"
+ ]
+ ]
+ },
+ {
+ "input": "&uacute",
+ "description": "Named entity: uacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00fa"
+ ]
+ ]
+ },
+ {
+ "input": "&uacute;",
+ "description": "Named entity: uacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00fa"
+ ]
+ ]
+ },
+ {
+ "input": "&uarr",
+ "description": "Bad named entity: uarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uarr"
+ ]
+ ]
+ },
+ {
+ "input": "&uarr;",
+ "description": "Named entity: uarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2191"
+ ]
+ ]
+ },
+ {
+ "input": "&ubrcy",
+ "description": "Bad named entity: ubrcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ubrcy"
+ ]
+ ]
+ },
+ {
+ "input": "&ubrcy;",
+ "description": "Named entity: ubrcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u045e"
+ ]
+ ]
+ },
+ {
+ "input": "&ubreve",
+ "description": "Bad named entity: ubreve without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ubreve"
+ ]
+ ]
+ },
+ {
+ "input": "&ubreve;",
+ "description": "Named entity: ubreve; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u016d"
+ ]
+ ]
+ },
+ {
+ "input": "&ucirc",
+ "description": "Named entity: ucirc without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00fb"
+ ]
+ ]
+ },
+ {
+ "input": "&ucirc;",
+ "description": "Named entity: ucirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00fb"
+ ]
+ ]
+ },
+ {
+ "input": "&ucy",
+ "description": "Bad named entity: ucy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ucy"
+ ]
+ ]
+ },
+ {
+ "input": "&ucy;",
+ "description": "Named entity: ucy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0443"
+ ]
+ ]
+ },
+ {
+ "input": "&udarr",
+ "description": "Bad named entity: udarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&udarr"
+ ]
+ ]
+ },
+ {
+ "input": "&udarr;",
+ "description": "Named entity: udarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c5"
+ ]
+ ]
+ },
+ {
+ "input": "&udblac",
+ "description": "Bad named entity: udblac without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&udblac"
+ ]
+ ]
+ },
+ {
+ "input": "&udblac;",
+ "description": "Named entity: udblac; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0171"
+ ]
+ ]
+ },
+ {
+ "input": "&udhar",
+ "description": "Bad named entity: udhar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&udhar"
+ ]
+ ]
+ },
+ {
+ "input": "&udhar;",
+ "description": "Named entity: udhar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u296e"
+ ]
+ ]
+ },
+ {
+ "input": "&ufisht",
+ "description": "Bad named entity: ufisht without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ufisht"
+ ]
+ ]
+ },
+ {
+ "input": "&ufisht;",
+ "description": "Named entity: ufisht; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u297e"
+ ]
+ ]
+ },
+ {
+ "input": "&ufr",
+ "description": "Bad named entity: ufr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ufr"
+ ]
+ ]
+ },
+ {
+ "input": "&ufr;",
+ "description": "Named entity: ufr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd32"
+ ]
+ ]
+ },
+ {
+ "input": "&ugrave",
+ "description": "Named entity: ugrave without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00f9"
+ ]
+ ]
+ },
+ {
+ "input": "&ugrave;",
+ "description": "Named entity: ugrave; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00f9"
+ ]
+ ]
+ },
+ {
+ "input": "&uharl",
+ "description": "Bad named entity: uharl without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uharl"
+ ]
+ ]
+ },
+ {
+ "input": "&uharl;",
+ "description": "Named entity: uharl; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bf"
+ ]
+ ]
+ },
+ {
+ "input": "&uharr",
+ "description": "Bad named entity: uharr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uharr"
+ ]
+ ]
+ },
+ {
+ "input": "&uharr;",
+ "description": "Named entity: uharr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21be"
+ ]
+ ]
+ },
+ {
+ "input": "&uhblk",
+ "description": "Bad named entity: uhblk without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uhblk"
+ ]
+ ]
+ },
+ {
+ "input": "&uhblk;",
+ "description": "Named entity: uhblk; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2580"
+ ]
+ ]
+ },
+ {
+ "input": "&ulcorn",
+ "description": "Bad named entity: ulcorn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ulcorn"
+ ]
+ ]
+ },
+ {
+ "input": "&ulcorn;",
+ "description": "Named entity: ulcorn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231c"
+ ]
+ ]
+ },
+ {
+ "input": "&ulcorner",
+ "description": "Bad named entity: ulcorner without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ulcorner"
+ ]
+ ]
+ },
+ {
+ "input": "&ulcorner;",
+ "description": "Named entity: ulcorner; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231c"
+ ]
+ ]
+ },
+ {
+ "input": "&ulcrop",
+ "description": "Bad named entity: ulcrop without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ulcrop"
+ ]
+ ]
+ },
+ {
+ "input": "&ulcrop;",
+ "description": "Named entity: ulcrop; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230f"
+ ]
+ ]
+ },
+ {
+ "input": "&ultri",
+ "description": "Bad named entity: ultri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ultri"
+ ]
+ ]
+ },
+ {
+ "input": "&ultri;",
+ "description": "Named entity: ultri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25f8"
+ ]
+ ]
+ },
+ {
+ "input": "&umacr",
+ "description": "Bad named entity: umacr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&umacr"
+ ]
+ ]
+ },
+ {
+ "input": "&umacr;",
+ "description": "Named entity: umacr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u016b"
+ ]
+ ]
+ },
+ {
+ "input": "&uml",
+ "description": "Named entity: uml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a8"
+ ]
+ ]
+ },
+ {
+ "input": "&uml;",
+ "description": "Named entity: uml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a8"
+ ]
+ ]
+ },
+ {
+ "input": "&uogon",
+ "description": "Bad named entity: uogon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uogon"
+ ]
+ ]
+ },
+ {
+ "input": "&uogon;",
+ "description": "Named entity: uogon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0173"
+ ]
+ ]
+ },
+ {
+ "input": "&uopf",
+ "description": "Bad named entity: uopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uopf"
+ ]
+ ]
+ },
+ {
+ "input": "&uopf;",
+ "description": "Named entity: uopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd66"
+ ]
+ ]
+ },
+ {
+ "input": "&uparrow",
+ "description": "Bad named entity: uparrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uparrow"
+ ]
+ ]
+ },
+ {
+ "input": "&uparrow;",
+ "description": "Named entity: uparrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2191"
+ ]
+ ]
+ },
+ {
+ "input": "&updownarrow",
+ "description": "Bad named entity: updownarrow without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&updownarrow"
+ ]
+ ]
+ },
+ {
+ "input": "&updownarrow;",
+ "description": "Named entity: updownarrow; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2195"
+ ]
+ ]
+ },
+ {
+ "input": "&upharpoonleft",
+ "description": "Bad named entity: upharpoonleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&upharpoonleft"
+ ]
+ ]
+ },
+ {
+ "input": "&upharpoonleft;",
+ "description": "Named entity: upharpoonleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21bf"
+ ]
+ ]
+ },
+ {
+ "input": "&upharpoonright",
+ "description": "Bad named entity: upharpoonright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&upharpoonright"
+ ]
+ ]
+ },
+ {
+ "input": "&upharpoonright;",
+ "description": "Named entity: upharpoonright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21be"
+ ]
+ ]
+ },
+ {
+ "input": "&uplus",
+ "description": "Bad named entity: uplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uplus"
+ ]
+ ]
+ },
+ {
+ "input": "&uplus;",
+ "description": "Named entity: uplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228e"
+ ]
+ ]
+ },
+ {
+ "input": "&upsi",
+ "description": "Bad named entity: upsi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&upsi"
+ ]
+ ]
+ },
+ {
+ "input": "&upsi;",
+ "description": "Named entity: upsi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c5"
+ ]
+ ]
+ },
+ {
+ "input": "&upsih",
+ "description": "Bad named entity: upsih without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&upsih"
+ ]
+ ]
+ },
+ {
+ "input": "&upsih;",
+ "description": "Named entity: upsih; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d2"
+ ]
+ ]
+ },
+ {
+ "input": "&upsilon",
+ "description": "Bad named entity: upsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&upsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&upsilon;",
+ "description": "Named entity: upsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c5"
+ ]
+ ]
+ },
+ {
+ "input": "&upuparrows",
+ "description": "Bad named entity: upuparrows without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&upuparrows"
+ ]
+ ]
+ },
+ {
+ "input": "&upuparrows;",
+ "description": "Named entity: upuparrows; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c8"
+ ]
+ ]
+ },
+ {
+ "input": "&urcorn",
+ "description": "Bad named entity: urcorn without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&urcorn"
+ ]
+ ]
+ },
+ {
+ "input": "&urcorn;",
+ "description": "Named entity: urcorn; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231d"
+ ]
+ ]
+ },
+ {
+ "input": "&urcorner",
+ "description": "Bad named entity: urcorner without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&urcorner"
+ ]
+ ]
+ },
+ {
+ "input": "&urcorner;",
+ "description": "Named entity: urcorner; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u231d"
+ ]
+ ]
+ },
+ {
+ "input": "&urcrop",
+ "description": "Bad named entity: urcrop without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&urcrop"
+ ]
+ ]
+ },
+ {
+ "input": "&urcrop;",
+ "description": "Named entity: urcrop; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u230e"
+ ]
+ ]
+ },
+ {
+ "input": "&uring",
+ "description": "Bad named entity: uring without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uring"
+ ]
+ ]
+ },
+ {
+ "input": "&uring;",
+ "description": "Named entity: uring; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u016f"
+ ]
+ ]
+ },
+ {
+ "input": "&urtri",
+ "description": "Bad named entity: urtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&urtri"
+ ]
+ ]
+ },
+ {
+ "input": "&urtri;",
+ "description": "Named entity: urtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25f9"
+ ]
+ ]
+ },
+ {
+ "input": "&uscr",
+ "description": "Bad named entity: uscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uscr"
+ ]
+ ]
+ },
+ {
+ "input": "&uscr;",
+ "description": "Named entity: uscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcca"
+ ]
+ ]
+ },
+ {
+ "input": "&utdot",
+ "description": "Bad named entity: utdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&utdot"
+ ]
+ ]
+ },
+ {
+ "input": "&utdot;",
+ "description": "Named entity: utdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22f0"
+ ]
+ ]
+ },
+ {
+ "input": "&utilde",
+ "description": "Bad named entity: utilde without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&utilde"
+ ]
+ ]
+ },
+ {
+ "input": "&utilde;",
+ "description": "Named entity: utilde; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0169"
+ ]
+ ]
+ },
+ {
+ "input": "&utri",
+ "description": "Bad named entity: utri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&utri"
+ ]
+ ]
+ },
+ {
+ "input": "&utri;",
+ "description": "Named entity: utri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b5"
+ ]
+ ]
+ },
+ {
+ "input": "&utrif",
+ "description": "Bad named entity: utrif without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&utrif"
+ ]
+ ]
+ },
+ {
+ "input": "&utrif;",
+ "description": "Named entity: utrif; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b4"
+ ]
+ ]
+ },
+ {
+ "input": "&uuarr",
+ "description": "Bad named entity: uuarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uuarr"
+ ]
+ ]
+ },
+ {
+ "input": "&uuarr;",
+ "description": "Named entity: uuarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21c8"
+ ]
+ ]
+ },
+ {
+ "input": "&uuml",
+ "description": "Named entity: uuml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00fc"
+ ]
+ ]
+ },
+ {
+ "input": "&uuml;",
+ "description": "Named entity: uuml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00fc"
+ ]
+ ]
+ },
+ {
+ "input": "&uwangle",
+ "description": "Bad named entity: uwangle without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&uwangle"
+ ]
+ ]
+ },
+ {
+ "input": "&uwangle;",
+ "description": "Named entity: uwangle; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u29a7"
+ ]
+ ]
+ },
+ {
+ "input": "&vArr",
+ "description": "Bad named entity: vArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vArr"
+ ]
+ ]
+ },
+ {
+ "input": "&vArr;",
+ "description": "Named entity: vArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21d5"
+ ]
+ ]
+ },
+ {
+ "input": "&vBar",
+ "description": "Bad named entity: vBar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vBar"
+ ]
+ ]
+ },
+ {
+ "input": "&vBar;",
+ "description": "Named entity: vBar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ae8"
+ ]
+ ]
+ },
+ {
+ "input": "&vBarv",
+ "description": "Bad named entity: vBarv without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vBarv"
+ ]
+ ]
+ },
+ {
+ "input": "&vBarv;",
+ "description": "Named entity: vBarv; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2ae9"
+ ]
+ ]
+ },
+ {
+ "input": "&vDash",
+ "description": "Bad named entity: vDash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vDash"
+ ]
+ ]
+ },
+ {
+ "input": "&vDash;",
+ "description": "Named entity: vDash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a8"
+ ]
+ ]
+ },
+ {
+ "input": "&vangrt",
+ "description": "Bad named entity: vangrt without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vangrt"
+ ]
+ ]
+ },
+ {
+ "input": "&vangrt;",
+ "description": "Named entity: vangrt; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u299c"
+ ]
+ ]
+ },
+ {
+ "input": "&varepsilon",
+ "description": "Bad named entity: varepsilon without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varepsilon"
+ ]
+ ]
+ },
+ {
+ "input": "&varepsilon;",
+ "description": "Named entity: varepsilon; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f5"
+ ]
+ ]
+ },
+ {
+ "input": "&varkappa",
+ "description": "Bad named entity: varkappa without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varkappa"
+ ]
+ ]
+ },
+ {
+ "input": "&varkappa;",
+ "description": "Named entity: varkappa; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f0"
+ ]
+ ]
+ },
+ {
+ "input": "&varnothing",
+ "description": "Bad named entity: varnothing without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varnothing"
+ ]
+ ]
+ },
+ {
+ "input": "&varnothing;",
+ "description": "Named entity: varnothing; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2205"
+ ]
+ ]
+ },
+ {
+ "input": "&varphi",
+ "description": "Bad named entity: varphi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varphi"
+ ]
+ ]
+ },
+ {
+ "input": "&varphi;",
+ "description": "Named entity: varphi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d5"
+ ]
+ ]
+ },
+ {
+ "input": "&varpi",
+ "description": "Bad named entity: varpi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varpi"
+ ]
+ ]
+ },
+ {
+ "input": "&varpi;",
+ "description": "Named entity: varpi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d6"
+ ]
+ ]
+ },
+ {
+ "input": "&varpropto",
+ "description": "Bad named entity: varpropto without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varpropto"
+ ]
+ ]
+ },
+ {
+ "input": "&varpropto;",
+ "description": "Named entity: varpropto; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221d"
+ ]
+ ]
+ },
+ {
+ "input": "&varr",
+ "description": "Bad named entity: varr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varr"
+ ]
+ ]
+ },
+ {
+ "input": "&varr;",
+ "description": "Named entity: varr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2195"
+ ]
+ ]
+ },
+ {
+ "input": "&varrho",
+ "description": "Bad named entity: varrho without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varrho"
+ ]
+ ]
+ },
+ {
+ "input": "&varrho;",
+ "description": "Named entity: varrho; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03f1"
+ ]
+ ]
+ },
+ {
+ "input": "&varsigma",
+ "description": "Bad named entity: varsigma without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varsigma"
+ ]
+ ]
+ },
+ {
+ "input": "&varsigma;",
+ "description": "Named entity: varsigma; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03c2"
+ ]
+ ]
+ },
+ {
+ "input": "&varsubsetneq",
+ "description": "Bad named entity: varsubsetneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varsubsetneq"
+ ]
+ ]
+ },
+ {
+ "input": "&varsubsetneq;",
+ "description": "Named entity: varsubsetneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228a\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&varsubsetneqq",
+ "description": "Bad named entity: varsubsetneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varsubsetneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&varsubsetneqq;",
+ "description": "Named entity: varsubsetneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acb\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&varsupsetneq",
+ "description": "Bad named entity: varsupsetneq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varsupsetneq"
+ ]
+ ]
+ },
+ {
+ "input": "&varsupsetneq;",
+ "description": "Named entity: varsupsetneq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228b\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&varsupsetneqq",
+ "description": "Bad named entity: varsupsetneqq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&varsupsetneqq"
+ ]
+ ]
+ },
+ {
+ "input": "&varsupsetneqq;",
+ "description": "Named entity: varsupsetneqq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acc\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&vartheta",
+ "description": "Bad named entity: vartheta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vartheta"
+ ]
+ ]
+ },
+ {
+ "input": "&vartheta;",
+ "description": "Named entity: vartheta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03d1"
+ ]
+ ]
+ },
+ {
+ "input": "&vartriangleleft",
+ "description": "Bad named entity: vartriangleleft without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vartriangleleft"
+ ]
+ ]
+ },
+ {
+ "input": "&vartriangleleft;",
+ "description": "Named entity: vartriangleleft; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b2"
+ ]
+ ]
+ },
+ {
+ "input": "&vartriangleright",
+ "description": "Bad named entity: vartriangleright without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vartriangleright"
+ ]
+ ]
+ },
+ {
+ "input": "&vartriangleright;",
+ "description": "Named entity: vartriangleright; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b3"
+ ]
+ ]
+ },
+ {
+ "input": "&vcy",
+ "description": "Bad named entity: vcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vcy"
+ ]
+ ]
+ },
+ {
+ "input": "&vcy;",
+ "description": "Named entity: vcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0432"
+ ]
+ ]
+ },
+ {
+ "input": "&vdash",
+ "description": "Bad named entity: vdash without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vdash"
+ ]
+ ]
+ },
+ {
+ "input": "&vdash;",
+ "description": "Named entity: vdash; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22a2"
+ ]
+ ]
+ },
+ {
+ "input": "&vee",
+ "description": "Bad named entity: vee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vee"
+ ]
+ ]
+ },
+ {
+ "input": "&vee;",
+ "description": "Named entity: vee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2228"
+ ]
+ ]
+ },
+ {
+ "input": "&veebar",
+ "description": "Bad named entity: veebar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&veebar"
+ ]
+ ]
+ },
+ {
+ "input": "&veebar;",
+ "description": "Named entity: veebar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22bb"
+ ]
+ ]
+ },
+ {
+ "input": "&veeeq",
+ "description": "Bad named entity: veeeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&veeeq"
+ ]
+ ]
+ },
+ {
+ "input": "&veeeq;",
+ "description": "Named entity: veeeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u225a"
+ ]
+ ]
+ },
+ {
+ "input": "&vellip",
+ "description": "Bad named entity: vellip without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vellip"
+ ]
+ ]
+ },
+ {
+ "input": "&vellip;",
+ "description": "Named entity: vellip; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22ee"
+ ]
+ ]
+ },
+ {
+ "input": "&verbar",
+ "description": "Bad named entity: verbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&verbar"
+ ]
+ ]
+ },
+ {
+ "input": "&verbar;",
+ "description": "Named entity: verbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "|"
+ ]
+ ]
+ },
+ {
+ "input": "&vert",
+ "description": "Bad named entity: vert without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vert"
+ ]
+ ]
+ },
+ {
+ "input": "&vert;",
+ "description": "Named entity: vert; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "|"
+ ]
+ ]
+ },
+ {
+ "input": "&vfr",
+ "description": "Bad named entity: vfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vfr"
+ ]
+ ]
+ },
+ {
+ "input": "&vfr;",
+ "description": "Named entity: vfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd33"
+ ]
+ ]
+ },
+ {
+ "input": "&vltri",
+ "description": "Bad named entity: vltri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vltri"
+ ]
+ ]
+ },
+ {
+ "input": "&vltri;",
+ "description": "Named entity: vltri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b2"
+ ]
+ ]
+ },
+ {
+ "input": "&vnsub",
+ "description": "Bad named entity: vnsub without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vnsub"
+ ]
+ ]
+ },
+ {
+ "input": "&vnsub;",
+ "description": "Named entity: vnsub; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2282\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&vnsup",
+ "description": "Bad named entity: vnsup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vnsup"
+ ]
+ ]
+ },
+ {
+ "input": "&vnsup;",
+ "description": "Named entity: vnsup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2283\u20d2"
+ ]
+ ]
+ },
+ {
+ "input": "&vopf",
+ "description": "Bad named entity: vopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vopf"
+ ]
+ ]
+ },
+ {
+ "input": "&vopf;",
+ "description": "Named entity: vopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd67"
+ ]
+ ]
+ },
+ {
+ "input": "&vprop",
+ "description": "Bad named entity: vprop without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vprop"
+ ]
+ ]
+ },
+ {
+ "input": "&vprop;",
+ "description": "Named entity: vprop; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u221d"
+ ]
+ ]
+ },
+ {
+ "input": "&vrtri",
+ "description": "Bad named entity: vrtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vrtri"
+ ]
+ ]
+ },
+ {
+ "input": "&vrtri;",
+ "description": "Named entity: vrtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22b3"
+ ]
+ ]
+ },
+ {
+ "input": "&vscr",
+ "description": "Bad named entity: vscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vscr"
+ ]
+ ]
+ },
+ {
+ "input": "&vscr;",
+ "description": "Named entity: vscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udccb"
+ ]
+ ]
+ },
+ {
+ "input": "&vsubnE",
+ "description": "Bad named entity: vsubnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vsubnE"
+ ]
+ ]
+ },
+ {
+ "input": "&vsubnE;",
+ "description": "Named entity: vsubnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acb\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&vsubne",
+ "description": "Bad named entity: vsubne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vsubne"
+ ]
+ ]
+ },
+ {
+ "input": "&vsubne;",
+ "description": "Named entity: vsubne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228a\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&vsupnE",
+ "description": "Bad named entity: vsupnE without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vsupnE"
+ ]
+ ]
+ },
+ {
+ "input": "&vsupnE;",
+ "description": "Named entity: vsupnE; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2acc\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&vsupne",
+ "description": "Bad named entity: vsupne without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vsupne"
+ ]
+ ]
+ },
+ {
+ "input": "&vsupne;",
+ "description": "Named entity: vsupne; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u228b\ufe00"
+ ]
+ ]
+ },
+ {
+ "input": "&vzigzag",
+ "description": "Bad named entity: vzigzag without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&vzigzag"
+ ]
+ ]
+ },
+ {
+ "input": "&vzigzag;",
+ "description": "Named entity: vzigzag; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u299a"
+ ]
+ ]
+ },
+ {
+ "input": "&wcirc",
+ "description": "Bad named entity: wcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&wcirc;",
+ "description": "Named entity: wcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0175"
+ ]
+ ]
+ },
+ {
+ "input": "&wedbar",
+ "description": "Bad named entity: wedbar without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wedbar"
+ ]
+ ]
+ },
+ {
+ "input": "&wedbar;",
+ "description": "Named entity: wedbar; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a5f"
+ ]
+ ]
+ },
+ {
+ "input": "&wedge",
+ "description": "Bad named entity: wedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wedge"
+ ]
+ ]
+ },
+ {
+ "input": "&wedge;",
+ "description": "Named entity: wedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2227"
+ ]
+ ]
+ },
+ {
+ "input": "&wedgeq",
+ "description": "Bad named entity: wedgeq without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wedgeq"
+ ]
+ ]
+ },
+ {
+ "input": "&wedgeq;",
+ "description": "Named entity: wedgeq; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2259"
+ ]
+ ]
+ },
+ {
+ "input": "&weierp",
+ "description": "Bad named entity: weierp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&weierp"
+ ]
+ ]
+ },
+ {
+ "input": "&weierp;",
+ "description": "Named entity: weierp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2118"
+ ]
+ ]
+ },
+ {
+ "input": "&wfr",
+ "description": "Bad named entity: wfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wfr"
+ ]
+ ]
+ },
+ {
+ "input": "&wfr;",
+ "description": "Named entity: wfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd34"
+ ]
+ ]
+ },
+ {
+ "input": "&wopf",
+ "description": "Bad named entity: wopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wopf"
+ ]
+ ]
+ },
+ {
+ "input": "&wopf;",
+ "description": "Named entity: wopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd68"
+ ]
+ ]
+ },
+ {
+ "input": "&wp",
+ "description": "Bad named entity: wp without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wp"
+ ]
+ ]
+ },
+ {
+ "input": "&wp;",
+ "description": "Named entity: wp; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2118"
+ ]
+ ]
+ },
+ {
+ "input": "&wr",
+ "description": "Bad named entity: wr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wr"
+ ]
+ ]
+ },
+ {
+ "input": "&wr;",
+ "description": "Named entity: wr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2240"
+ ]
+ ]
+ },
+ {
+ "input": "&wreath",
+ "description": "Bad named entity: wreath without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wreath"
+ ]
+ ]
+ },
+ {
+ "input": "&wreath;",
+ "description": "Named entity: wreath; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2240"
+ ]
+ ]
+ },
+ {
+ "input": "&wscr",
+ "description": "Bad named entity: wscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&wscr"
+ ]
+ ]
+ },
+ {
+ "input": "&wscr;",
+ "description": "Named entity: wscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udccc"
+ ]
+ ]
+ },
+ {
+ "input": "&xcap",
+ "description": "Bad named entity: xcap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xcap"
+ ]
+ ]
+ },
+ {
+ "input": "&xcap;",
+ "description": "Named entity: xcap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c2"
+ ]
+ ]
+ },
+ {
+ "input": "&xcirc",
+ "description": "Bad named entity: xcirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xcirc"
+ ]
+ ]
+ },
+ {
+ "input": "&xcirc;",
+ "description": "Named entity: xcirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25ef"
+ ]
+ ]
+ },
+ {
+ "input": "&xcup",
+ "description": "Bad named entity: xcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xcup"
+ ]
+ ]
+ },
+ {
+ "input": "&xcup;",
+ "description": "Named entity: xcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c3"
+ ]
+ ]
+ },
+ {
+ "input": "&xdtri",
+ "description": "Bad named entity: xdtri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xdtri"
+ ]
+ ]
+ },
+ {
+ "input": "&xdtri;",
+ "description": "Named entity: xdtri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25bd"
+ ]
+ ]
+ },
+ {
+ "input": "&xfr",
+ "description": "Bad named entity: xfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xfr"
+ ]
+ ]
+ },
+ {
+ "input": "&xfr;",
+ "description": "Named entity: xfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd35"
+ ]
+ ]
+ },
+ {
+ "input": "&xhArr",
+ "description": "Bad named entity: xhArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xhArr"
+ ]
+ ]
+ },
+ {
+ "input": "&xhArr;",
+ "description": "Named entity: xhArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27fa"
+ ]
+ ]
+ },
+ {
+ "input": "&xharr",
+ "description": "Bad named entity: xharr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xharr"
+ ]
+ ]
+ },
+ {
+ "input": "&xharr;",
+ "description": "Named entity: xharr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f7"
+ ]
+ ]
+ },
+ {
+ "input": "&xi",
+ "description": "Bad named entity: xi without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xi"
+ ]
+ ]
+ },
+ {
+ "input": "&xi;",
+ "description": "Named entity: xi; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03be"
+ ]
+ ]
+ },
+ {
+ "input": "&xlArr",
+ "description": "Bad named entity: xlArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xlArr"
+ ]
+ ]
+ },
+ {
+ "input": "&xlArr;",
+ "description": "Named entity: xlArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f8"
+ ]
+ ]
+ },
+ {
+ "input": "&xlarr",
+ "description": "Bad named entity: xlarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xlarr"
+ ]
+ ]
+ },
+ {
+ "input": "&xlarr;",
+ "description": "Named entity: xlarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f5"
+ ]
+ ]
+ },
+ {
+ "input": "&xmap",
+ "description": "Bad named entity: xmap without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xmap"
+ ]
+ ]
+ },
+ {
+ "input": "&xmap;",
+ "description": "Named entity: xmap; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27fc"
+ ]
+ ]
+ },
+ {
+ "input": "&xnis",
+ "description": "Bad named entity: xnis without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xnis"
+ ]
+ ]
+ },
+ {
+ "input": "&xnis;",
+ "description": "Named entity: xnis; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22fb"
+ ]
+ ]
+ },
+ {
+ "input": "&xodot",
+ "description": "Bad named entity: xodot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xodot"
+ ]
+ ]
+ },
+ {
+ "input": "&xodot;",
+ "description": "Named entity: xodot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a00"
+ ]
+ ]
+ },
+ {
+ "input": "&xopf",
+ "description": "Bad named entity: xopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xopf"
+ ]
+ ]
+ },
+ {
+ "input": "&xopf;",
+ "description": "Named entity: xopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd69"
+ ]
+ ]
+ },
+ {
+ "input": "&xoplus",
+ "description": "Bad named entity: xoplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xoplus"
+ ]
+ ]
+ },
+ {
+ "input": "&xoplus;",
+ "description": "Named entity: xoplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a01"
+ ]
+ ]
+ },
+ {
+ "input": "&xotime",
+ "description": "Bad named entity: xotime without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xotime"
+ ]
+ ]
+ },
+ {
+ "input": "&xotime;",
+ "description": "Named entity: xotime; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a02"
+ ]
+ ]
+ },
+ {
+ "input": "&xrArr",
+ "description": "Bad named entity: xrArr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xrArr"
+ ]
+ ]
+ },
+ {
+ "input": "&xrArr;",
+ "description": "Named entity: xrArr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f9"
+ ]
+ ]
+ },
+ {
+ "input": "&xrarr",
+ "description": "Bad named entity: xrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&xrarr;",
+ "description": "Named entity: xrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u27f6"
+ ]
+ ]
+ },
+ {
+ "input": "&xscr",
+ "description": "Bad named entity: xscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xscr"
+ ]
+ ]
+ },
+ {
+ "input": "&xscr;",
+ "description": "Named entity: xscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udccd"
+ ]
+ ]
+ },
+ {
+ "input": "&xsqcup",
+ "description": "Bad named entity: xsqcup without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xsqcup"
+ ]
+ ]
+ },
+ {
+ "input": "&xsqcup;",
+ "description": "Named entity: xsqcup; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a06"
+ ]
+ ]
+ },
+ {
+ "input": "&xuplus",
+ "description": "Bad named entity: xuplus without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xuplus"
+ ]
+ ]
+ },
+ {
+ "input": "&xuplus;",
+ "description": "Named entity: xuplus; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2a04"
+ ]
+ ]
+ },
+ {
+ "input": "&xutri",
+ "description": "Bad named entity: xutri without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xutri"
+ ]
+ ]
+ },
+ {
+ "input": "&xutri;",
+ "description": "Named entity: xutri; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u25b3"
+ ]
+ ]
+ },
+ {
+ "input": "&xvee",
+ "description": "Bad named entity: xvee without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xvee"
+ ]
+ ]
+ },
+ {
+ "input": "&xvee;",
+ "description": "Named entity: xvee; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c1"
+ ]
+ ]
+ },
+ {
+ "input": "&xwedge",
+ "description": "Bad named entity: xwedge without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&xwedge"
+ ]
+ ]
+ },
+ {
+ "input": "&xwedge;",
+ "description": "Named entity: xwedge; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u22c0"
+ ]
+ ]
+ },
+ {
+ "input": "&yacute",
+ "description": "Named entity: yacute without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00fd"
+ ]
+ ]
+ },
+ {
+ "input": "&yacute;",
+ "description": "Named entity: yacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00fd"
+ ]
+ ]
+ },
+ {
+ "input": "&yacy",
+ "description": "Bad named entity: yacy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&yacy"
+ ]
+ ]
+ },
+ {
+ "input": "&yacy;",
+ "description": "Named entity: yacy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u044f"
+ ]
+ ]
+ },
+ {
+ "input": "&ycirc",
+ "description": "Bad named entity: ycirc without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ycirc"
+ ]
+ ]
+ },
+ {
+ "input": "&ycirc;",
+ "description": "Named entity: ycirc; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0177"
+ ]
+ ]
+ },
+ {
+ "input": "&ycy",
+ "description": "Bad named entity: ycy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&ycy"
+ ]
+ ]
+ },
+ {
+ "input": "&ycy;",
+ "description": "Named entity: ycy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u044b"
+ ]
+ ]
+ },
+ {
+ "input": "&yen",
+ "description": "Named entity: yen without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00a5"
+ ]
+ ]
+ },
+ {
+ "input": "&yen;",
+ "description": "Named entity: yen; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00a5"
+ ]
+ ]
+ },
+ {
+ "input": "&yfr",
+ "description": "Bad named entity: yfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&yfr"
+ ]
+ ]
+ },
+ {
+ "input": "&yfr;",
+ "description": "Named entity: yfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd36"
+ ]
+ ]
+ },
+ {
+ "input": "&yicy",
+ "description": "Bad named entity: yicy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&yicy"
+ ]
+ ]
+ },
+ {
+ "input": "&yicy;",
+ "description": "Named entity: yicy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0457"
+ ]
+ ]
+ },
+ {
+ "input": "&yopf",
+ "description": "Bad named entity: yopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&yopf"
+ ]
+ ]
+ },
+ {
+ "input": "&yopf;",
+ "description": "Named entity: yopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd6a"
+ ]
+ ]
+ },
+ {
+ "input": "&yscr",
+ "description": "Bad named entity: yscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&yscr"
+ ]
+ ]
+ },
+ {
+ "input": "&yscr;",
+ "description": "Named entity: yscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udcce"
+ ]
+ ]
+ },
+ {
+ "input": "&yucy",
+ "description": "Bad named entity: yucy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&yucy"
+ ]
+ ]
+ },
+ {
+ "input": "&yucy;",
+ "description": "Named entity: yucy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u044e"
+ ]
+ ]
+ },
+ {
+ "input": "&yuml",
+ "description": "Named entity: yuml without a semi-colon",
+ "output": [
+ "ParseError",
+ [
+ "Character",
+ "\u00ff"
+ ]
+ ]
+ },
+ {
+ "input": "&yuml;",
+ "description": "Named entity: yuml; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u00ff"
+ ]
+ ]
+ },
+ {
+ "input": "&zacute",
+ "description": "Bad named entity: zacute without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zacute"
+ ]
+ ]
+ },
+ {
+ "input": "&zacute;",
+ "description": "Named entity: zacute; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u017a"
+ ]
+ ]
+ },
+ {
+ "input": "&zcaron",
+ "description": "Bad named entity: zcaron without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zcaron"
+ ]
+ ]
+ },
+ {
+ "input": "&zcaron;",
+ "description": "Named entity: zcaron; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u017e"
+ ]
+ ]
+ },
+ {
+ "input": "&zcy",
+ "description": "Bad named entity: zcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zcy"
+ ]
+ ]
+ },
+ {
+ "input": "&zcy;",
+ "description": "Named entity: zcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0437"
+ ]
+ ]
+ },
+ {
+ "input": "&zdot",
+ "description": "Bad named entity: zdot without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zdot"
+ ]
+ ]
+ },
+ {
+ "input": "&zdot;",
+ "description": "Named entity: zdot; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u017c"
+ ]
+ ]
+ },
+ {
+ "input": "&zeetrf",
+ "description": "Bad named entity: zeetrf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zeetrf"
+ ]
+ ]
+ },
+ {
+ "input": "&zeetrf;",
+ "description": "Named entity: zeetrf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u2128"
+ ]
+ ]
+ },
+ {
+ "input": "&zeta",
+ "description": "Bad named entity: zeta without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zeta"
+ ]
+ ]
+ },
+ {
+ "input": "&zeta;",
+ "description": "Named entity: zeta; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u03b6"
+ ]
+ ]
+ },
+ {
+ "input": "&zfr",
+ "description": "Bad named entity: zfr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zfr"
+ ]
+ ]
+ },
+ {
+ "input": "&zfr;",
+ "description": "Named entity: zfr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd37"
+ ]
+ ]
+ },
+ {
+ "input": "&zhcy",
+ "description": "Bad named entity: zhcy without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zhcy"
+ ]
+ ]
+ },
+ {
+ "input": "&zhcy;",
+ "description": "Named entity: zhcy; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u0436"
+ ]
+ ]
+ },
+ {
+ "input": "&zigrarr",
+ "description": "Bad named entity: zigrarr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zigrarr"
+ ]
+ ]
+ },
+ {
+ "input": "&zigrarr;",
+ "description": "Named entity: zigrarr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u21dd"
+ ]
+ ]
+ },
+ {
+ "input": "&zopf",
+ "description": "Bad named entity: zopf without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zopf"
+ ]
+ ]
+ },
+ {
+ "input": "&zopf;",
+ "description": "Named entity: zopf; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udd6b"
+ ]
+ ]
+ },
+ {
+ "input": "&zscr",
+ "description": "Bad named entity: zscr without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zscr"
+ ]
+ ]
+ },
+ {
+ "input": "&zscr;",
+ "description": "Named entity: zscr; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\ud835\udccf"
+ ]
+ ]
+ },
+ {
+ "input": "&zwj",
+ "description": "Bad named entity: zwj without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zwj"
+ ]
+ ]
+ },
+ {
+ "input": "&zwj;",
+ "description": "Named entity: zwj; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200d"
+ ]
+ ]
+ },
+ {
+ "input": "&zwnj",
+ "description": "Bad named entity: zwnj without a semi-colon",
+ "output": [
+ [
+ "Character",
+ "&zwnj"
+ ]
+ ]
+ },
+ {
+ "input": "&zwnj;",
+ "description": "Named entity: zwnj; with a semi-colon",
+ "output": [
+ [
+ "Character",
+ "\u200c"
+ ]
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/numericEntities.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/numericEntities.test
new file mode 100644
index 000000000..36c82281c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/numericEntities.test
@@ -0,0 +1,1313 @@
+{"tests": [
+
+{"description": "Invalid numeric entity character U+0000",
+"input": "&#x0000;",
+"output": ["ParseError", ["Character", "\uFFFD"]]},
+
+{"description": "Invalid numeric entity character U+0001",
+"input": "&#x0001;",
+"output": ["ParseError", ["Character", "\u0001"]]},
+
+{"description": "Invalid numeric entity character U+0002",
+"input": "&#x0002;",
+"output": ["ParseError", ["Character", "\u0002"]]},
+
+{"description": "Invalid numeric entity character U+0003",
+"input": "&#x0003;",
+"output": ["ParseError", ["Character", "\u0003"]]},
+
+{"description": "Invalid numeric entity character U+0004",
+"input": "&#x0004;",
+"output": ["ParseError", ["Character", "\u0004"]]},
+
+{"description": "Invalid numeric entity character U+0005",
+"input": "&#x0005;",
+"output": ["ParseError", ["Character", "\u0005"]]},
+
+{"description": "Invalid numeric entity character U+0006",
+"input": "&#x0006;",
+"output": ["ParseError", ["Character", "\u0006"]]},
+
+{"description": "Invalid numeric entity character U+0007",
+"input": "&#x0007;",
+"output": ["ParseError", ["Character", "\u0007"]]},
+
+{"description": "Invalid numeric entity character U+0008",
+"input": "&#x0008;",
+"output": ["ParseError", ["Character", "\u0008"]]},
+
+{"description": "Invalid numeric entity character U+000B",
+"input": "&#x000b;",
+"output": ["ParseError", ["Character", "\u000b"]]},
+
+{"description": "Invalid numeric entity character U+000E",
+"input": "&#x000e;",
+"output": ["ParseError", ["Character", "\u000e"]]},
+
+{"description": "Invalid numeric entity character U+000F",
+"input": "&#x000f;",
+"output": ["ParseError", ["Character", "\u000f"]]},
+
+{"description": "Invalid numeric entity character U+0010",
+"input": "&#x0010;",
+"output": ["ParseError", ["Character", "\u0010"]]},
+
+{"description": "Invalid numeric entity character U+0011",
+"input": "&#x0011;",
+"output": ["ParseError", ["Character", "\u0011"]]},
+
+{"description": "Invalid numeric entity character U+0012",
+"input": "&#x0012;",
+"output": ["ParseError", ["Character", "\u0012"]]},
+
+{"description": "Invalid numeric entity character U+0013",
+"input": "&#x0013;",
+"output": ["ParseError", ["Character", "\u0013"]]},
+
+{"description": "Invalid numeric entity character U+0014",
+"input": "&#x0014;",
+"output": ["ParseError", ["Character", "\u0014"]]},
+
+{"description": "Invalid numeric entity character U+0015",
+"input": "&#x0015;",
+"output": ["ParseError", ["Character", "\u0015"]]},
+
+{"description": "Invalid numeric entity character U+0016",
+"input": "&#x0016;",
+"output": ["ParseError", ["Character", "\u0016"]]},
+
+{"description": "Invalid numeric entity character U+0017",
+"input": "&#x0017;",
+"output": ["ParseError", ["Character", "\u0017"]]},
+
+{"description": "Invalid numeric entity character U+0018",
+"input": "&#x0018;",
+"output": ["ParseError", ["Character", "\u0018"]]},
+
+{"description": "Invalid numeric entity character U+0019",
+"input": "&#x0019;",
+"output": ["ParseError", ["Character", "\u0019"]]},
+
+{"description": "Invalid numeric entity character U+001A",
+"input": "&#x001a;",
+"output": ["ParseError", ["Character", "\u001a"]]},
+
+{"description": "Invalid numeric entity character U+001B",
+"input": "&#x001b;",
+"output": ["ParseError", ["Character", "\u001b"]]},
+
+{"description": "Invalid numeric entity character U+001C",
+"input": "&#x001c;",
+"output": ["ParseError", ["Character", "\u001c"]]},
+
+{"description": "Invalid numeric entity character U+001D",
+"input": "&#x001d;",
+"output": ["ParseError", ["Character", "\u001d"]]},
+
+{"description": "Invalid numeric entity character U+001E",
+"input": "&#x001e;",
+"output": ["ParseError", ["Character", "\u001e"]]},
+
+{"description": "Invalid numeric entity character U+001F",
+"input": "&#x001f;",
+"output": ["ParseError", ["Character", "\u001f"]]},
+
+{"description": "Invalid numeric entity character U+007F",
+"input": "&#x007f;",
+"output": ["ParseError", ["Character", "\u007f"]]},
+
+{"description": "Invalid numeric entity character U+D800",
+"input": "&#xd800;",
+"output": ["ParseError", ["Character", "\uFFFD"]]},
+
+{"description": "Invalid numeric entity character U+DFFF",
+"input": "&#xdfff;",
+"output": ["ParseError", ["Character", "\uFFFD"]]},
+
+{"description": "Invalid numeric entity character U+FDD0",
+"input": "&#xfdd0;",
+"output": ["ParseError", ["Character", "\ufdd0"]]},
+
+{"description": "Invalid numeric entity character U+FDD1",
+"input": "&#xfdd1;",
+"output": ["ParseError", ["Character", "\ufdd1"]]},
+
+{"description": "Invalid numeric entity character U+FDD2",
+"input": "&#xfdd2;",
+"output": ["ParseError", ["Character", "\ufdd2"]]},
+
+{"description": "Invalid numeric entity character U+FDD3",
+"input": "&#xfdd3;",
+"output": ["ParseError", ["Character", "\ufdd3"]]},
+
+{"description": "Invalid numeric entity character U+FDD4",
+"input": "&#xfdd4;",
+"output": ["ParseError", ["Character", "\ufdd4"]]},
+
+{"description": "Invalid numeric entity character U+FDD5",
+"input": "&#xfdd5;",
+"output": ["ParseError", ["Character", "\ufdd5"]]},
+
+{"description": "Invalid numeric entity character U+FDD6",
+"input": "&#xfdd6;",
+"output": ["ParseError", ["Character", "\ufdd6"]]},
+
+{"description": "Invalid numeric entity character U+FDD7",
+"input": "&#xfdd7;",
+"output": ["ParseError", ["Character", "\ufdd7"]]},
+
+{"description": "Invalid numeric entity character U+FDD8",
+"input": "&#xfdd8;",
+"output": ["ParseError", ["Character", "\ufdd8"]]},
+
+{"description": "Invalid numeric entity character U+FDD9",
+"input": "&#xfdd9;",
+"output": ["ParseError", ["Character", "\ufdd9"]]},
+
+{"description": "Invalid numeric entity character U+FDDA",
+"input": "&#xfdda;",
+"output": ["ParseError", ["Character", "\ufdda"]]},
+
+{"description": "Invalid numeric entity character U+FDDB",
+"input": "&#xfddb;",
+"output": ["ParseError", ["Character", "\ufddb"]]},
+
+{"description": "Invalid numeric entity character U+FDDC",
+"input": "&#xfddc;",
+"output": ["ParseError", ["Character", "\ufddc"]]},
+
+{"description": "Invalid numeric entity character U+FDDD",
+"input": "&#xfddd;",
+"output": ["ParseError", ["Character", "\ufddd"]]},
+
+{"description": "Invalid numeric entity character U+FDDE",
+"input": "&#xfdde;",
+"output": ["ParseError", ["Character", "\ufdde"]]},
+
+{"description": "Invalid numeric entity character U+FDDF",
+"input": "&#xfddf;",
+"output": ["ParseError", ["Character", "\ufddf"]]},
+
+{"description": "Invalid numeric entity character U+FDE0",
+"input": "&#xfde0;",
+"output": ["ParseError", ["Character", "\ufde0"]]},
+
+{"description": "Invalid numeric entity character U+FDE1",
+"input": "&#xfde1;",
+"output": ["ParseError", ["Character", "\ufde1"]]},
+
+{"description": "Invalid numeric entity character U+FDE2",
+"input": "&#xfde2;",
+"output": ["ParseError", ["Character", "\ufde2"]]},
+
+{"description": "Invalid numeric entity character U+FDE3",
+"input": "&#xfde3;",
+"output": ["ParseError", ["Character", "\ufde3"]]},
+
+{"description": "Invalid numeric entity character U+FDE4",
+"input": "&#xfde4;",
+"output": ["ParseError", ["Character", "\ufde4"]]},
+
+{"description": "Invalid numeric entity character U+FDE5",
+"input": "&#xfde5;",
+"output": ["ParseError", ["Character", "\ufde5"]]},
+
+{"description": "Invalid numeric entity character U+FDE6",
+"input": "&#xfde6;",
+"output": ["ParseError", ["Character", "\ufde6"]]},
+
+{"description": "Invalid numeric entity character U+FDE7",
+"input": "&#xfde7;",
+"output": ["ParseError", ["Character", "\ufde7"]]},
+
+{"description": "Invalid numeric entity character U+FDE8",
+"input": "&#xfde8;",
+"output": ["ParseError", ["Character", "\ufde8"]]},
+
+{"description": "Invalid numeric entity character U+FDE9",
+"input": "&#xfde9;",
+"output": ["ParseError", ["Character", "\ufde9"]]},
+
+{"description": "Invalid numeric entity character U+FDEA",
+"input": "&#xfdea;",
+"output": ["ParseError", ["Character", "\ufdea"]]},
+
+{"description": "Invalid numeric entity character U+FDEB",
+"input": "&#xfdeb;",
+"output": ["ParseError", ["Character", "\ufdeb"]]},
+
+{"description": "Invalid numeric entity character U+FDEC",
+"input": "&#xfdec;",
+"output": ["ParseError", ["Character", "\ufdec"]]},
+
+{"description": "Invalid numeric entity character U+FDED",
+"input": "&#xfded;",
+"output": ["ParseError", ["Character", "\ufded"]]},
+
+{"description": "Invalid numeric entity character U+FDEE",
+"input": "&#xfdee;",
+"output": ["ParseError", ["Character", "\ufdee"]]},
+
+{"description": "Invalid numeric entity character U+FDEF",
+"input": "&#xfdef;",
+"output": ["ParseError", ["Character", "\ufdef"]]},
+
+{"description": "Invalid numeric entity character U+FFFE",
+"input": "&#xfffe;",
+"output": ["ParseError", ["Character", "\ufffe"]]},
+
+{"description": "Invalid numeric entity character U+FFFF",
+"input": "&#xffff;",
+"output": ["ParseError", ["Character", "\uffff"]]},
+
+{"description": "Invalid numeric entity character U+1FFFE",
+"input": "&#x1fffe;",
+"output": ["ParseError", ["Character", "\uD83F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+1FFFF",
+"input": "&#x1ffff;",
+"output": ["ParseError", ["Character", "\uD83F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+2FFFE",
+"input": "&#x2fffe;",
+"output": ["ParseError", ["Character", "\uD87F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+2FFFF",
+"input": "&#x2ffff;",
+"output": ["ParseError", ["Character", "\uD87F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+3FFFE",
+"input": "&#x3fffe;",
+"output": ["ParseError", ["Character", "\uD8BF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+3FFFF",
+"input": "&#x3ffff;",
+"output": ["ParseError", ["Character", "\uD8BF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+4FFFE",
+"input": "&#x4fffe;",
+"output": ["ParseError", ["Character", "\uD8FF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+4FFFF",
+"input": "&#x4ffff;",
+"output": ["ParseError", ["Character", "\uD8FF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+5FFFE",
+"input": "&#x5fffe;",
+"output": ["ParseError", ["Character", "\uD93F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+5FFFF",
+"input": "&#x5ffff;",
+"output": ["ParseError", ["Character", "\uD93F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+6FFFE",
+"input": "&#x6fffe;",
+"output": ["ParseError", ["Character", "\uD97F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+6FFFF",
+"input": "&#x6ffff;",
+"output": ["ParseError", ["Character", "\uD97F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+7FFFE",
+"input": "&#x7fffe;",
+"output": ["ParseError", ["Character", "\uD9BF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+7FFFF",
+"input": "&#x7ffff;",
+"output": ["ParseError", ["Character", "\uD9BF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+8FFFE",
+"input": "&#x8fffe;",
+"output": ["ParseError", ["Character", "\uD9FF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+8FFFF",
+"input": "&#x8ffff;",
+"output": ["ParseError", ["Character", "\uD9FF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+9FFFE",
+"input": "&#x9fffe;",
+"output": ["ParseError", ["Character", "\uDA3F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+9FFFF",
+"input": "&#x9ffff;",
+"output": ["ParseError", ["Character", "\uDA3F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+AFFFE",
+"input": "&#xafffe;",
+"output": ["ParseError", ["Character", "\uDA7F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+AFFFF",
+"input": "&#xaffff;",
+"output": ["ParseError", ["Character", "\uDA7F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+BFFFE",
+"input": "&#xbfffe;",
+"output": ["ParseError", ["Character", "\uDABF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+BFFFF",
+"input": "&#xbffff;",
+"output": ["ParseError", ["Character", "\uDABF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+CFFFE",
+"input": "&#xcfffe;",
+"output": ["ParseError", ["Character", "\uDAFF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+CFFFF",
+"input": "&#xcffff;",
+"output": ["ParseError", ["Character", "\uDAFF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+DFFFE",
+"input": "&#xdfffe;",
+"output": ["ParseError", ["Character", "\uDB3F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+DFFFF",
+"input": "&#xdffff;",
+"output": ["ParseError", ["Character", "\uDB3F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+EFFFE",
+"input": "&#xefffe;",
+"output": ["ParseError", ["Character", "\uDB7F\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+EFFFF",
+"input": "&#xeffff;",
+"output": ["ParseError", ["Character", "\uDB7F\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+FFFFE",
+"input": "&#xffffe;",
+"output": ["ParseError", ["Character", "\uDBBF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+FFFFF",
+"input": "&#xfffff;",
+"output": ["ParseError", ["Character", "\uDBBF\uDFFF"]]},
+
+{"description": "Invalid numeric entity character U+10FFFE",
+"input": "&#x10fffe;",
+"output": ["ParseError", ["Character", "\uDBFF\uDFFE"]]},
+
+{"description": "Invalid numeric entity character U+10FFFF",
+"input": "&#x10ffff;",
+"output": ["ParseError", ["Character", "\uDBFF\uDFFF"]]},
+
+{"description": "Valid numeric entity character U+0009",
+"input": "&#x0009;",
+"output": [["Character", "\u0009"]]},
+
+{"description": "Valid numeric entity character U+000A",
+"input": "&#x000a;",
+"output": [["Character", "\u000A"]]},
+
+{"description": "Valid numeric entity character U+0020",
+"input": "&#x0020;",
+"output": [["Character", "\u0020"]]},
+
+{"description": "Valid numeric entity character U+0021",
+"input": "&#x0021;",
+"output": [["Character", "\u0021"]]},
+
+{"description": "Valid numeric entity character U+0022",
+"input": "&#x0022;",
+"output": [["Character", "\u0022"]]},
+
+{"description": "Valid numeric entity character U+0023",
+"input": "&#x0023;",
+"output": [["Character", "\u0023"]]},
+
+{"description": "Valid numeric entity character U+0024",
+"input": "&#x0024;",
+"output": [["Character", "\u0024"]]},
+
+{"description": "Valid numeric entity character U+0025",
+"input": "&#x0025;",
+"output": [["Character", "\u0025"]]},
+
+{"description": "Valid numeric entity character U+0026",
+"input": "&#x0026;",
+"output": [["Character", "\u0026"]]},
+
+{"description": "Valid numeric entity character U+0027",
+"input": "&#x0027;",
+"output": [["Character", "\u0027"]]},
+
+{"description": "Valid numeric entity character U+0028",
+"input": "&#x0028;",
+"output": [["Character", "\u0028"]]},
+
+{"description": "Valid numeric entity character U+0029",
+"input": "&#x0029;",
+"output": [["Character", "\u0029"]]},
+
+{"description": "Valid numeric entity character U+002A",
+"input": "&#x002a;",
+"output": [["Character", "\u002A"]]},
+
+{"description": "Valid numeric entity character U+002B",
+"input": "&#x002b;",
+"output": [["Character", "\u002B"]]},
+
+{"description": "Valid numeric entity character U+002C",
+"input": "&#x002c;",
+"output": [["Character", "\u002C"]]},
+
+{"description": "Valid numeric entity character U+002D",
+"input": "&#x002d;",
+"output": [["Character", "\u002D"]]},
+
+{"description": "Valid numeric entity character U+002E",
+"input": "&#x002e;",
+"output": [["Character", "\u002E"]]},
+
+{"description": "Valid numeric entity character U+002F",
+"input": "&#x002f;",
+"output": [["Character", "\u002F"]]},
+
+{"description": "Valid numeric entity character U+0030",
+"input": "&#x0030;",
+"output": [["Character", "\u0030"]]},
+
+{"description": "Valid numeric entity character U+0031",
+"input": "&#x0031;",
+"output": [["Character", "\u0031"]]},
+
+{"description": "Valid numeric entity character U+0032",
+"input": "&#x0032;",
+"output": [["Character", "\u0032"]]},
+
+{"description": "Valid numeric entity character U+0033",
+"input": "&#x0033;",
+"output": [["Character", "\u0033"]]},
+
+{"description": "Valid numeric entity character U+0034",
+"input": "&#x0034;",
+"output": [["Character", "\u0034"]]},
+
+{"description": "Valid numeric entity character U+0035",
+"input": "&#x0035;",
+"output": [["Character", "\u0035"]]},
+
+{"description": "Valid numeric entity character U+0036",
+"input": "&#x0036;",
+"output": [["Character", "\u0036"]]},
+
+{"description": "Valid numeric entity character U+0037",
+"input": "&#x0037;",
+"output": [["Character", "\u0037"]]},
+
+{"description": "Valid numeric entity character U+0038",
+"input": "&#x0038;",
+"output": [["Character", "\u0038"]]},
+
+{"description": "Valid numeric entity character U+0039",
+"input": "&#x0039;",
+"output": [["Character", "\u0039"]]},
+
+{"description": "Valid numeric entity character U+003A",
+"input": "&#x003a;",
+"output": [["Character", "\u003A"]]},
+
+{"description": "Valid numeric entity character U+003B",
+"input": "&#x003b;",
+"output": [["Character", "\u003B"]]},
+
+{"description": "Valid numeric entity character U+003C",
+"input": "&#x003c;",
+"output": [["Character", "\u003C"]]},
+
+{"description": "Valid numeric entity character U+003D",
+"input": "&#x003d;",
+"output": [["Character", "\u003D"]]},
+
+{"description": "Valid numeric entity character U+003E",
+"input": "&#x003e;",
+"output": [["Character", "\u003E"]]},
+
+{"description": "Valid numeric entity character U+003F",
+"input": "&#x003f;",
+"output": [["Character", "\u003F"]]},
+
+{"description": "Valid numeric entity character U+0040",
+"input": "&#x0040;",
+"output": [["Character", "\u0040"]]},
+
+{"description": "Valid numeric entity character U+0041",
+"input": "&#x0041;",
+"output": [["Character", "\u0041"]]},
+
+{"description": "Valid numeric entity character U+0042",
+"input": "&#x0042;",
+"output": [["Character", "\u0042"]]},
+
+{"description": "Valid numeric entity character U+0043",
+"input": "&#x0043;",
+"output": [["Character", "\u0043"]]},
+
+{"description": "Valid numeric entity character U+0044",
+"input": "&#x0044;",
+"output": [["Character", "\u0044"]]},
+
+{"description": "Valid numeric entity character U+0045",
+"input": "&#x0045;",
+"output": [["Character", "\u0045"]]},
+
+{"description": "Valid numeric entity character U+0046",
+"input": "&#x0046;",
+"output": [["Character", "\u0046"]]},
+
+{"description": "Valid numeric entity character U+0047",
+"input": "&#x0047;",
+"output": [["Character", "\u0047"]]},
+
+{"description": "Valid numeric entity character U+0048",
+"input": "&#x0048;",
+"output": [["Character", "\u0048"]]},
+
+{"description": "Valid numeric entity character U+0049",
+"input": "&#x0049;",
+"output": [["Character", "\u0049"]]},
+
+{"description": "Valid numeric entity character U+004A",
+"input": "&#x004a;",
+"output": [["Character", "\u004A"]]},
+
+{"description": "Valid numeric entity character U+004B",
+"input": "&#x004b;",
+"output": [["Character", "\u004B"]]},
+
+{"description": "Valid numeric entity character U+004C",
+"input": "&#x004c;",
+"output": [["Character", "\u004C"]]},
+
+{"description": "Valid numeric entity character U+004D",
+"input": "&#x004d;",
+"output": [["Character", "\u004D"]]},
+
+{"description": "Valid numeric entity character U+004E",
+"input": "&#x004e;",
+"output": [["Character", "\u004E"]]},
+
+{"description": "Valid numeric entity character U+004F",
+"input": "&#x004f;",
+"output": [["Character", "\u004F"]]},
+
+{"description": "Valid numeric entity character U+0050",
+"input": "&#x0050;",
+"output": [["Character", "\u0050"]]},
+
+{"description": "Valid numeric entity character U+0051",
+"input": "&#x0051;",
+"output": [["Character", "\u0051"]]},
+
+{"description": "Valid numeric entity character U+0052",
+"input": "&#x0052;",
+"output": [["Character", "\u0052"]]},
+
+{"description": "Valid numeric entity character U+0053",
+"input": "&#x0053;",
+"output": [["Character", "\u0053"]]},
+
+{"description": "Valid numeric entity character U+0054",
+"input": "&#x0054;",
+"output": [["Character", "\u0054"]]},
+
+{"description": "Valid numeric entity character U+0055",
+"input": "&#x0055;",
+"output": [["Character", "\u0055"]]},
+
+{"description": "Valid numeric entity character U+0056",
+"input": "&#x0056;",
+"output": [["Character", "\u0056"]]},
+
+{"description": "Valid numeric entity character U+0057",
+"input": "&#x0057;",
+"output": [["Character", "\u0057"]]},
+
+{"description": "Valid numeric entity character U+0058",
+"input": "&#x0058;",
+"output": [["Character", "\u0058"]]},
+
+{"description": "Valid numeric entity character U+0059",
+"input": "&#x0059;",
+"output": [["Character", "\u0059"]]},
+
+{"description": "Valid numeric entity character U+005A",
+"input": "&#x005a;",
+"output": [["Character", "\u005A"]]},
+
+{"description": "Valid numeric entity character U+005B",
+"input": "&#x005b;",
+"output": [["Character", "\u005B"]]},
+
+{"description": "Valid numeric entity character U+005C",
+"input": "&#x005c;",
+"output": [["Character", "\u005C"]]},
+
+{"description": "Valid numeric entity character U+005D",
+"input": "&#x005d;",
+"output": [["Character", "\u005D"]]},
+
+{"description": "Valid numeric entity character U+005E",
+"input": "&#x005e;",
+"output": [["Character", "\u005E"]]},
+
+{"description": "Valid numeric entity character U+005F",
+"input": "&#x005f;",
+"output": [["Character", "\u005F"]]},
+
+{"description": "Valid numeric entity character U+0060",
+"input": "&#x0060;",
+"output": [["Character", "\u0060"]]},
+
+{"description": "Valid numeric entity character U+0061",
+"input": "&#x0061;",
+"output": [["Character", "\u0061"]]},
+
+{"description": "Valid numeric entity character U+0062",
+"input": "&#x0062;",
+"output": [["Character", "\u0062"]]},
+
+{"description": "Valid numeric entity character U+0063",
+"input": "&#x0063;",
+"output": [["Character", "\u0063"]]},
+
+{"description": "Valid numeric entity character U+0064",
+"input": "&#x0064;",
+"output": [["Character", "\u0064"]]},
+
+{"description": "Valid numeric entity character U+0065",
+"input": "&#x0065;",
+"output": [["Character", "\u0065"]]},
+
+{"description": "Valid numeric entity character U+0066",
+"input": "&#x0066;",
+"output": [["Character", "\u0066"]]},
+
+{"description": "Valid numeric entity character U+0067",
+"input": "&#x0067;",
+"output": [["Character", "\u0067"]]},
+
+{"description": "Valid numeric entity character U+0068",
+"input": "&#x0068;",
+"output": [["Character", "\u0068"]]},
+
+{"description": "Valid numeric entity character U+0069",
+"input": "&#x0069;",
+"output": [["Character", "\u0069"]]},
+
+{"description": "Valid numeric entity character U+006A",
+"input": "&#x006a;",
+"output": [["Character", "\u006A"]]},
+
+{"description": "Valid numeric entity character U+006B",
+"input": "&#x006b;",
+"output": [["Character", "\u006B"]]},
+
+{"description": "Valid numeric entity character U+006C",
+"input": "&#x006c;",
+"output": [["Character", "\u006C"]]},
+
+{"description": "Valid numeric entity character U+006D",
+"input": "&#x006d;",
+"output": [["Character", "\u006D"]]},
+
+{"description": "Valid numeric entity character U+006E",
+"input": "&#x006e;",
+"output": [["Character", "\u006E"]]},
+
+{"description": "Valid numeric entity character U+006F",
+"input": "&#x006f;",
+"output": [["Character", "\u006F"]]},
+
+{"description": "Valid numeric entity character U+0070",
+"input": "&#x0070;",
+"output": [["Character", "\u0070"]]},
+
+{"description": "Valid numeric entity character U+0071",
+"input": "&#x0071;",
+"output": [["Character", "\u0071"]]},
+
+{"description": "Valid numeric entity character U+0072",
+"input": "&#x0072;",
+"output": [["Character", "\u0072"]]},
+
+{"description": "Valid numeric entity character U+0073",
+"input": "&#x0073;",
+"output": [["Character", "\u0073"]]},
+
+{"description": "Valid numeric entity character U+0074",
+"input": "&#x0074;",
+"output": [["Character", "\u0074"]]},
+
+{"description": "Valid numeric entity character U+0075",
+"input": "&#x0075;",
+"output": [["Character", "\u0075"]]},
+
+{"description": "Valid numeric entity character U+0076",
+"input": "&#x0076;",
+"output": [["Character", "\u0076"]]},
+
+{"description": "Valid numeric entity character U+0077",
+"input": "&#x0077;",
+"output": [["Character", "\u0077"]]},
+
+{"description": "Valid numeric entity character U+0078",
+"input": "&#x0078;",
+"output": [["Character", "\u0078"]]},
+
+{"description": "Valid numeric entity character U+0079",
+"input": "&#x0079;",
+"output": [["Character", "\u0079"]]},
+
+{"description": "Valid numeric entity character U+007A",
+"input": "&#x007a;",
+"output": [["Character", "\u007A"]]},
+
+{"description": "Valid numeric entity character U+007B",
+"input": "&#x007b;",
+"output": [["Character", "\u007B"]]},
+
+{"description": "Valid numeric entity character U+007C",
+"input": "&#x007c;",
+"output": [["Character", "\u007C"]]},
+
+{"description": "Valid numeric entity character U+007D",
+"input": "&#x007d;",
+"output": [["Character", "\u007D"]]},
+
+{"description": "Valid numeric entity character U+007E",
+"input": "&#x007e;",
+"output": [["Character", "\u007E"]]},
+
+{"description": "Valid numeric entity character U+00A0",
+"input": "&#x00a0;",
+"output": [["Character", "\u00A0"]]},
+
+{"description": "Valid numeric entity character U+00A1",
+"input": "&#x00a1;",
+"output": [["Character", "\u00A1"]]},
+
+{"description": "Valid numeric entity character U+00A2",
+"input": "&#x00a2;",
+"output": [["Character", "\u00A2"]]},
+
+{"description": "Valid numeric entity character U+00A3",
+"input": "&#x00a3;",
+"output": [["Character", "\u00A3"]]},
+
+{"description": "Valid numeric entity character U+00A4",
+"input": "&#x00a4;",
+"output": [["Character", "\u00A4"]]},
+
+{"description": "Valid numeric entity character U+00A5",
+"input": "&#x00a5;",
+"output": [["Character", "\u00A5"]]},
+
+{"description": "Valid numeric entity character U+00A6",
+"input": "&#x00a6;",
+"output": [["Character", "\u00A6"]]},
+
+{"description": "Valid numeric entity character U+00A7",
+"input": "&#x00a7;",
+"output": [["Character", "\u00A7"]]},
+
+{"description": "Valid numeric entity character U+00A8",
+"input": "&#x00a8;",
+"output": [["Character", "\u00A8"]]},
+
+{"description": "Valid numeric entity character U+00A9",
+"input": "&#x00a9;",
+"output": [["Character", "\u00A9"]]},
+
+{"description": "Valid numeric entity character U+00AA",
+"input": "&#x00aa;",
+"output": [["Character", "\u00AA"]]},
+
+{"description": "Valid numeric entity character U+00AB",
+"input": "&#x00ab;",
+"output": [["Character", "\u00AB"]]},
+
+{"description": "Valid numeric entity character U+00AC",
+"input": "&#x00ac;",
+"output": [["Character", "\u00AC"]]},
+
+{"description": "Valid numeric entity character U+00AD",
+"input": "&#x00ad;",
+"output": [["Character", "\u00AD"]]},
+
+{"description": "Valid numeric entity character U+00AE",
+"input": "&#x00ae;",
+"output": [["Character", "\u00AE"]]},
+
+{"description": "Valid numeric entity character U+00AF",
+"input": "&#x00af;",
+"output": [["Character", "\u00AF"]]},
+
+{"description": "Valid numeric entity character U+00B0",
+"input": "&#x00b0;",
+"output": [["Character", "\u00B0"]]},
+
+{"description": "Valid numeric entity character U+00B1",
+"input": "&#x00b1;",
+"output": [["Character", "\u00B1"]]},
+
+{"description": "Valid numeric entity character U+00B2",
+"input": "&#x00b2;",
+"output": [["Character", "\u00B2"]]},
+
+{"description": "Valid numeric entity character U+00B3",
+"input": "&#x00b3;",
+"output": [["Character", "\u00B3"]]},
+
+{"description": "Valid numeric entity character U+00B4",
+"input": "&#x00b4;",
+"output": [["Character", "\u00B4"]]},
+
+{"description": "Valid numeric entity character U+00B5",
+"input": "&#x00b5;",
+"output": [["Character", "\u00B5"]]},
+
+{"description": "Valid numeric entity character U+00B6",
+"input": "&#x00b6;",
+"output": [["Character", "\u00B6"]]},
+
+{"description": "Valid numeric entity character U+00B7",
+"input": "&#x00b7;",
+"output": [["Character", "\u00B7"]]},
+
+{"description": "Valid numeric entity character U+00B8",
+"input": "&#x00b8;",
+"output": [["Character", "\u00B8"]]},
+
+{"description": "Valid numeric entity character U+00B9",
+"input": "&#x00b9;",
+"output": [["Character", "\u00B9"]]},
+
+{"description": "Valid numeric entity character U+00BA",
+"input": "&#x00ba;",
+"output": [["Character", "\u00BA"]]},
+
+{"description": "Valid numeric entity character U+00BB",
+"input": "&#x00bb;",
+"output": [["Character", "\u00BB"]]},
+
+{"description": "Valid numeric entity character U+00BC",
+"input": "&#x00bc;",
+"output": [["Character", "\u00BC"]]},
+
+{"description": "Valid numeric entity character U+00BD",
+"input": "&#x00bd;",
+"output": [["Character", "\u00BD"]]},
+
+{"description": "Valid numeric entity character U+00BE",
+"input": "&#x00be;",
+"output": [["Character", "\u00BE"]]},
+
+{"description": "Valid numeric entity character U+00BF",
+"input": "&#x00bf;",
+"output": [["Character", "\u00BF"]]},
+
+{"description": "Valid numeric entity character U+00C0",
+"input": "&#x00c0;",
+"output": [["Character", "\u00C0"]]},
+
+{"description": "Valid numeric entity character U+00C1",
+"input": "&#x00c1;",
+"output": [["Character", "\u00C1"]]},
+
+{"description": "Valid numeric entity character U+00C2",
+"input": "&#x00c2;",
+"output": [["Character", "\u00C2"]]},
+
+{"description": "Valid numeric entity character U+00C3",
+"input": "&#x00c3;",
+"output": [["Character", "\u00C3"]]},
+
+{"description": "Valid numeric entity character U+00C4",
+"input": "&#x00c4;",
+"output": [["Character", "\u00C4"]]},
+
+{"description": "Valid numeric entity character U+00C5",
+"input": "&#x00c5;",
+"output": [["Character", "\u00C5"]]},
+
+{"description": "Valid numeric entity character U+00C6",
+"input": "&#x00c6;",
+"output": [["Character", "\u00C6"]]},
+
+{"description": "Valid numeric entity character U+00C7",
+"input": "&#x00c7;",
+"output": [["Character", "\u00C7"]]},
+
+{"description": "Valid numeric entity character U+00C8",
+"input": "&#x00c8;",
+"output": [["Character", "\u00C8"]]},
+
+{"description": "Valid numeric entity character U+00C9",
+"input": "&#x00c9;",
+"output": [["Character", "\u00C9"]]},
+
+{"description": "Valid numeric entity character U+00CA",
+"input": "&#x00ca;",
+"output": [["Character", "\u00CA"]]},
+
+{"description": "Valid numeric entity character U+00CB",
+"input": "&#x00cb;",
+"output": [["Character", "\u00CB"]]},
+
+{"description": "Valid numeric entity character U+00CC",
+"input": "&#x00cc;",
+"output": [["Character", "\u00CC"]]},
+
+{"description": "Valid numeric entity character U+00CD",
+"input": "&#x00cd;",
+"output": [["Character", "\u00CD"]]},
+
+{"description": "Valid numeric entity character U+00CE",
+"input": "&#x00ce;",
+"output": [["Character", "\u00CE"]]},
+
+{"description": "Valid numeric entity character U+00CF",
+"input": "&#x00cf;",
+"output": [["Character", "\u00CF"]]},
+
+{"description": "Valid numeric entity character U+00D0",
+"input": "&#x00d0;",
+"output": [["Character", "\u00D0"]]},
+
+{"description": "Valid numeric entity character U+00D1",
+"input": "&#x00d1;",
+"output": [["Character", "\u00D1"]]},
+
+{"description": "Valid numeric entity character U+00D2",
+"input": "&#x00d2;",
+"output": [["Character", "\u00D2"]]},
+
+{"description": "Valid numeric entity character U+00D3",
+"input": "&#x00d3;",
+"output": [["Character", "\u00D3"]]},
+
+{"description": "Valid numeric entity character U+00D4",
+"input": "&#x00d4;",
+"output": [["Character", "\u00D4"]]},
+
+{"description": "Valid numeric entity character U+00D5",
+"input": "&#x00d5;",
+"output": [["Character", "\u00D5"]]},
+
+{"description": "Valid numeric entity character U+00D6",
+"input": "&#x00d6;",
+"output": [["Character", "\u00D6"]]},
+
+{"description": "Valid numeric entity character U+00D7",
+"input": "&#x00d7;",
+"output": [["Character", "\u00D7"]]},
+
+{"description": "Valid numeric entity character U+00D8",
+"input": "&#x00d8;",
+"output": [["Character", "\u00D8"]]},
+
+{"description": "Valid numeric entity character U+00D9",
+"input": "&#x00d9;",
+"output": [["Character", "\u00D9"]]},
+
+{"description": "Valid numeric entity character U+00DA",
+"input": "&#x00da;",
+"output": [["Character", "\u00DA"]]},
+
+{"description": "Valid numeric entity character U+00DB",
+"input": "&#x00db;",
+"output": [["Character", "\u00DB"]]},
+
+{"description": "Valid numeric entity character U+00DC",
+"input": "&#x00dc;",
+"output": [["Character", "\u00DC"]]},
+
+{"description": "Valid numeric entity character U+00DD",
+"input": "&#x00dd;",
+"output": [["Character", "\u00DD"]]},
+
+{"description": "Valid numeric entity character U+00DE",
+"input": "&#x00de;",
+"output": [["Character", "\u00DE"]]},
+
+{"description": "Valid numeric entity character U+00DF",
+"input": "&#x00df;",
+"output": [["Character", "\u00DF"]]},
+
+{"description": "Valid numeric entity character U+00E0",
+"input": "&#x00e0;",
+"output": [["Character", "\u00E0"]]},
+
+{"description": "Valid numeric entity character U+00E1",
+"input": "&#x00e1;",
+"output": [["Character", "\u00E1"]]},
+
+{"description": "Valid numeric entity character U+00E2",
+"input": "&#x00e2;",
+"output": [["Character", "\u00E2"]]},
+
+{"description": "Valid numeric entity character U+00E3",
+"input": "&#x00e3;",
+"output": [["Character", "\u00E3"]]},
+
+{"description": "Valid numeric entity character U+00E4",
+"input": "&#x00e4;",
+"output": [["Character", "\u00E4"]]},
+
+{"description": "Valid numeric entity character U+00E5",
+"input": "&#x00e5;",
+"output": [["Character", "\u00E5"]]},
+
+{"description": "Valid numeric entity character U+00E6",
+"input": "&#x00e6;",
+"output": [["Character", "\u00E6"]]},
+
+{"description": "Valid numeric entity character U+00E7",
+"input": "&#x00e7;",
+"output": [["Character", "\u00E7"]]},
+
+{"description": "Valid numeric entity character U+00E8",
+"input": "&#x00e8;",
+"output": [["Character", "\u00E8"]]},
+
+{"description": "Valid numeric entity character U+00E9",
+"input": "&#x00e9;",
+"output": [["Character", "\u00E9"]]},
+
+{"description": "Valid numeric entity character U+00EA",
+"input": "&#x00ea;",
+"output": [["Character", "\u00EA"]]},
+
+{"description": "Valid numeric entity character U+00EB",
+"input": "&#x00eb;",
+"output": [["Character", "\u00EB"]]},
+
+{"description": "Valid numeric entity character U+00EC",
+"input": "&#x00ec;",
+"output": [["Character", "\u00EC"]]},
+
+{"description": "Valid numeric entity character U+00ED",
+"input": "&#x00ed;",
+"output": [["Character", "\u00ED"]]},
+
+{"description": "Valid numeric entity character U+00EE",
+"input": "&#x00ee;",
+"output": [["Character", "\u00EE"]]},
+
+{"description": "Valid numeric entity character U+00EF",
+"input": "&#x00ef;",
+"output": [["Character", "\u00EF"]]},
+
+{"description": "Valid numeric entity character U+00F0",
+"input": "&#x00f0;",
+"output": [["Character", "\u00F0"]]},
+
+{"description": "Valid numeric entity character U+00F1",
+"input": "&#x00f1;",
+"output": [["Character", "\u00F1"]]},
+
+{"description": "Valid numeric entity character U+00F2",
+"input": "&#x00f2;",
+"output": [["Character", "\u00F2"]]},
+
+{"description": "Valid numeric entity character U+00F3",
+"input": "&#x00f3;",
+"output": [["Character", "\u00F3"]]},
+
+{"description": "Valid numeric entity character U+00F4",
+"input": "&#x00f4;",
+"output": [["Character", "\u00F4"]]},
+
+{"description": "Valid numeric entity character U+00F5",
+"input": "&#x00f5;",
+"output": [["Character", "\u00F5"]]},
+
+{"description": "Valid numeric entity character U+00F6",
+"input": "&#x00f6;",
+"output": [["Character", "\u00F6"]]},
+
+{"description": "Valid numeric entity character U+00F7",
+"input": "&#x00f7;",
+"output": [["Character", "\u00F7"]]},
+
+{"description": "Valid numeric entity character U+00F8",
+"input": "&#x00f8;",
+"output": [["Character", "\u00F8"]]},
+
+{"description": "Valid numeric entity character U+00F9",
+"input": "&#x00f9;",
+"output": [["Character", "\u00F9"]]},
+
+{"description": "Valid numeric entity character U+00FA",
+"input": "&#x00fa;",
+"output": [["Character", "\u00FA"]]},
+
+{"description": "Valid numeric entity character U+00FB",
+"input": "&#x00fb;",
+"output": [["Character", "\u00FB"]]},
+
+{"description": "Valid numeric entity character U+00FC",
+"input": "&#x00fc;",
+"output": [["Character", "\u00FC"]]},
+
+{"description": "Valid numeric entity character U+00FD",
+"input": "&#x00fd;",
+"output": [["Character", "\u00FD"]]},
+
+{"description": "Valid numeric entity character U+00FE",
+"input": "&#x00fe;",
+"output": [["Character", "\u00FE"]]},
+
+{"description": "Valid numeric entity character U+00FF",
+"input": "&#x00ff;",
+"output": [["Character", "\u00FF"]]},
+
+{"description": "Valid numeric entity character U+D7FF",
+"input": "&#xd7ff;",
+"output": [["Character", "\uD7FF"]]},
+
+{"description": "Valid numeric entity character U+E000",
+"input": "&#xe000;",
+"output": [["Character", "\uE000"]]},
+
+{"description": "Valid numeric entity character U+FDCF",
+"input": "&#xfdcf;",
+"output": [["Character", "\uFDCF"]]},
+
+{"description": "Valid numeric entity character U+FDF0",
+"input": "&#xfdf0;",
+"output": [["Character", "\uFDF0"]]},
+
+{"description": "Valid numeric entity character U+FFFD",
+"input": "&#xfffd;",
+"output": [["Character", "\uFFFD"]]},
+
+{"description": "Valid numeric entity character U+10000",
+"input": "&#x10000;",
+"output": [["Character", "\uD800\uDC00"]]},
+
+{"description": "Valid numeric entity character U+1FFFD",
+"input": "&#x1fffd;",
+"output": [["Character", "\uD83F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+20000",
+"input": "&#x20000;",
+"output": [["Character", "\uD840\uDC00"]]},
+
+{"description": "Valid numeric entity character U+2FFFD",
+"input": "&#x2fffd;",
+"output": [["Character", "\uD87F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+30000",
+"input": "&#x30000;",
+"output": [["Character", "\uD880\uDC00"]]},
+
+{"description": "Valid numeric entity character U+3FFFD",
+"input": "&#x3fffd;",
+"output": [["Character", "\uD8BF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+40000",
+"input": "&#x40000;",
+"output": [["Character", "\uD8C0\uDC00"]]},
+
+{"description": "Valid numeric entity character U+4FFFD",
+"input": "&#x4fffd;",
+"output": [["Character", "\uD8FF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+50000",
+"input": "&#x50000;",
+"output": [["Character", "\uD900\uDC00"]]},
+
+{"description": "Valid numeric entity character U+5FFFD",
+"input": "&#x5fffd;",
+"output": [["Character", "\uD93F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+60000",
+"input": "&#x60000;",
+"output": [["Character", "\uD940\uDC00"]]},
+
+{"description": "Valid numeric entity character U+6FFFD",
+"input": "&#x6fffd;",
+"output": [["Character", "\uD97F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+70000",
+"input": "&#x70000;",
+"output": [["Character", "\uD980\uDC00"]]},
+
+{"description": "Valid numeric entity character U+7FFFD",
+"input": "&#x7fffd;",
+"output": [["Character", "\uD9BF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+80000",
+"input": "&#x80000;",
+"output": [["Character", "\uD9C0\uDC00"]]},
+
+{"description": "Valid numeric entity character U+8FFFD",
+"input": "&#x8fffd;",
+"output": [["Character", "\uD9FF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+90000",
+"input": "&#x90000;",
+"output": [["Character", "\uDA00\uDC00"]]},
+
+{"description": "Valid numeric entity character U+9FFFD",
+"input": "&#x9fffd;",
+"output": [["Character", "\uDA3F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+A0000",
+"input": "&#xa0000;",
+"output": [["Character", "\uDA40\uDC00"]]},
+
+{"description": "Valid numeric entity character U+AFFFD",
+"input": "&#xafffd;",
+"output": [["Character", "\uDA7F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+B0000",
+"input": "&#xb0000;",
+"output": [["Character", "\uDA80\uDC00"]]},
+
+{"description": "Valid numeric entity character U+BFFFD",
+"input": "&#xbfffd;",
+"output": [["Character", "\uDABF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+C0000",
+"input": "&#xc0000;",
+"output": [["Character", "\uDAC0\uDC00"]]},
+
+{"description": "Valid numeric entity character U+CFFFD",
+"input": "&#xcfffd;",
+"output": [["Character", "\uDAFF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+D0000",
+"input": "&#xd0000;",
+"output": [["Character", "\uDB00\uDC00"]]},
+
+{"description": "Valid numeric entity character U+DFFFD",
+"input": "&#xdfffd;",
+"output": [["Character", "\uDB3F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+E0000",
+"input": "&#xe0000;",
+"output": [["Character", "\uDB40\uDC00"]]},
+
+{"description": "Valid numeric entity character U+EFFFD",
+"input": "&#xefffd;",
+"output": [["Character", "\uDB7F\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+F0000",
+"input": "&#xf0000;",
+"output": [["Character", "\uDB80\uDC00"]]},
+
+{"description": "Valid numeric entity character U+FFFFD",
+"input": "&#xffffd;",
+"output": [["Character", "\uDBBF\uDFFD"]]},
+
+{"description": "Valid numeric entity character U+100000",
+"input": "&#x100000;",
+"output": [["Character", "\uDBC0\uDC00"]]},
+
+{"description": "Valid numeric entity character U+10FFFD",
+"input": "&#x10fffd;",
+"output": [["Character", "\uDBFF\uDFFD"]]}
+
+]}
+
+
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/pendingSpecChanges.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/pendingSpecChanges.test
new file mode 100644
index 000000000..1b7dc3c72
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/pendingSpecChanges.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description":"<!---- >",
+"input":"<!---- >",
+"output":["ParseError", "ParseError", ["Comment","-- >"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test1.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test1.test
new file mode 100644
index 000000000..b97b2cbec
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test1.test
@@ -0,0 +1,196 @@
+{"tests": [
+
+{"description":"Correct Doctype lowercase",
+"input":"<!DOCTYPE html>",
+"output":[["DOCTYPE", "html", null, null, true]]},
+
+{"description":"Correct Doctype uppercase",
+"input":"<!DOCTYPE HTML>",
+"output":[["DOCTYPE", "html", null, null, true]]},
+
+{"description":"Correct Doctype mixed case",
+"input":"<!DOCTYPE HtMl>",
+"output":[["DOCTYPE", "html", null, null, true]]},
+
+{"description":"Correct Doctype case with EOF",
+"input":"<!DOCTYPE HtMl",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"Truncated doctype start",
+"input":"<!DOC>",
+"output":["ParseError", ["Comment", "DOC"]]},
+
+{"description":"Doctype in error",
+"input":"<!DOCTYPE foo>",
+"output":[["DOCTYPE", "foo", null, null, true]]},
+
+{"description":"Single Start Tag",
+"input":"<h>",
+"output":[["StartTag", "h", {}]]},
+
+{"description":"Empty end tag",
+"input":"</>",
+"output":["ParseError"]},
+
+{"description":"Empty start tag",
+"input":"<>",
+"output":["ParseError", ["Character", "<>"]]},
+
+{"description":"Start Tag w/attribute",
+"input":"<h a='b'>",
+"output":[["StartTag", "h", {"a":"b"}]]},
+
+{"description":"Start Tag w/attribute no quotes",
+"input":"<h a=b>",
+"output":[["StartTag", "h", {"a":"b"}]]},
+
+{"description":"Start/End Tag",
+"input":"<h></h>",
+"output":[["StartTag", "h", {}], ["EndTag", "h"]]},
+
+{"description":"Two unclosed start tags",
+"input":"<p>One<p>Two",
+"output":[["StartTag", "p", {}], ["Character", "One"], ["StartTag", "p", {}], ["Character", "Two"]]},
+
+{"description":"End Tag w/attribute",
+"input":"<h></h a='b'>",
+"output":[["StartTag", "h", {}], "ParseError", ["EndTag", "h"]]},
+
+{"description":"Multiple atts",
+"input":"<h a='b' c='d'>",
+"output":[["StartTag", "h", {"a":"b", "c":"d"}]]},
+
+{"description":"Multiple atts no space",
+"input":"<h a='b'c='d'>",
+"output":["ParseError", ["StartTag", "h", {"a":"b", "c":"d"}]]},
+
+{"description":"Repeated attr",
+ "input":"<h a='b' a='d'>",
+ "output":["ParseError", ["StartTag", "h", {"a":"b"}]]},
+
+{"description":"Simple comment",
+ "input":"<!--comment-->",
+ "output":[["Comment", "comment"]]},
+
+{"description":"Comment, Central dash no space",
+ "input":"<!----->",
+ "output":["ParseError", ["Comment", "-"]]},
+
+{"description":"Comment, two central dashes",
+"input":"<!-- --comment -->",
+"output":["ParseError", ["Comment", " --comment "]]},
+
+{"description":"Unfinished comment",
+"input":"<!--comment",
+"output":["ParseError", ["Comment", "comment"]]},
+
+{"description":"Start of a comment",
+"input":"<!-",
+"output":["ParseError", ["Comment", "-"]]},
+
+{"description":"Short comment",
+ "input":"<!-->",
+ "output":["ParseError", ["Comment", ""]]},
+
+{"description":"Short comment two",
+ "input":"<!--->",
+ "output":["ParseError", ["Comment", ""]]},
+
+{"description":"Short comment three",
+ "input":"<!---->",
+ "output":[["Comment", ""]]},
+
+
+{"description":"Ampersand EOF",
+"input":"&",
+"output":[["Character", "&"]]},
+
+{"description":"Ampersand ampersand EOF",
+"input":"&&",
+"output":[["Character", "&&"]]},
+
+{"description":"Ampersand space EOF",
+"input":"& ",
+"output":[["Character", "& "]]},
+
+{"description":"Unfinished entity",
+"input":"&f",
+"output":[["Character", "&f"]]},
+
+{"description":"Ampersand, number sign",
+"input":"&#",
+"output":["ParseError", ["Character", "&#"]]},
+
+{"description":"Unfinished numeric entity",
+"input":"&#x",
+"output":["ParseError", ["Character", "&#x"]]},
+
+{"description":"Entity with trailing semicolon (1)",
+"input":"I'm &not;it",
+"output":[["Character","I'm \u00ACit"]]},
+
+{"description":"Entity with trailing semicolon (2)",
+"input":"I'm &notin;",
+"output":[["Character","I'm \u2209"]]},
+
+{"description":"Entity without trailing semicolon (1)",
+"input":"I'm &notit",
+"output":[["Character","I'm "], "ParseError", ["Character", "\u00ACit"]]},
+
+{"description":"Entity without trailing semicolon (2)",
+"input":"I'm &notin",
+"output":[["Character","I'm "], "ParseError", ["Character", "\u00ACin"]]},
+
+{"description":"Partial entity match at end of file",
+"input":"I'm &no",
+"output":[["Character","I'm &no"]]},
+
+{"description":"Non-ASCII character reference name",
+"input":"&\u00AC;",
+"output":[["Character", "&\u00AC;"]]},
+
+{"description":"ASCII decimal entity",
+"input":"&#0036;",
+"output":[["Character","$"]]},
+
+{"description":"ASCII hexadecimal entity",
+"input":"&#x3f;",
+"output":[["Character","?"]]},
+
+{"description":"Hexadecimal entity in attribute",
+"input":"<h a='&#x3f;'></h>",
+"output":[["StartTag", "h", {"a":"?"}], ["EndTag", "h"]]},
+
+{"description":"Entity in attribute without semicolon ending in x",
+"input":"<h a='&notx'>",
+"output":[["StartTag", "h", {"a":"&notx"}]]},
+
+{"description":"Entity in attribute without semicolon ending in 1",
+"input":"<h a='&not1'>",
+"output":[["StartTag", "h", {"a":"&not1"}]]},
+
+{"description":"Entity in attribute without semicolon ending in i",
+"input":"<h a='&noti'>",
+"output":[["StartTag", "h", {"a":"&noti"}]]},
+
+{"description":"Entity in attribute without semicolon",
+"input":"<h a='&COPY'>",
+"output":["ParseError", ["StartTag", "h", {"a":"\u00A9"}]]},
+
+{"description":"Unquoted attribute ending in ampersand",
+"input":"<s o=& t>",
+"output":[["StartTag","s",{"o":"&","t":""}]]},
+
+{"description":"Unquoted attribute at end of tag with final character of &, with tag followed by characters",
+"input":"<a a=a&>foo",
+"output":[["StartTag", "a", {"a":"a&"}], ["Character", "foo"]]},
+
+{"description":"plaintext element",
+ "input":"<plaintext>foobar",
+ "output":[["StartTag","plaintext",{}], ["Character","foobar"]]},
+
+{"description":"Open angled bracket in unquoted attribute value state",
+ "input":"<a a=f<>",
+ "output":["ParseError", ["StartTag", "a", {"a":"f<"}]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test2.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test2.test
new file mode 100644
index 000000000..87a8eba34
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test2.test
@@ -0,0 +1,179 @@
+{"tests": [
+
+{"description":"DOCTYPE without name",
+"input":"<!DOCTYPE>",
+"output":["ParseError", "ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"DOCTYPE without space before name",
+"input":"<!DOCTYPEhtml>",
+"output":["ParseError", ["DOCTYPE", "html", null, null, true]]},
+
+{"description":"Incorrect DOCTYPE without a space before name",
+"input":"<!DOCTYPEfoo>",
+"output":["ParseError", ["DOCTYPE", "foo", null, null, true]]},
+
+{"description":"DOCTYPE with publicId",
+"input":"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML Transitional 4.01//EN\">",
+"output":[["DOCTYPE", "html", "-//W3C//DTD HTML Transitional 4.01//EN", null, true]]},
+
+{"description":"DOCTYPE with EOF after PUBLIC",
+"input":"<!DOCTYPE html PUBLIC",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"DOCTYPE with EOF after PUBLIC '",
+"input":"<!DOCTYPE html PUBLIC '",
+"output":["ParseError", ["DOCTYPE", "html", "", null, false]]},
+
+{"description":"DOCTYPE with EOF after PUBLIC 'x",
+"input":"<!DOCTYPE html PUBLIC 'x",
+"output":["ParseError", ["DOCTYPE", "html", "x", null, false]]},
+
+{"description":"DOCTYPE with systemId",
+"input":"<!DOCTYPE html SYSTEM \"-//W3C//DTD HTML Transitional 4.01//EN\">",
+"output":[["DOCTYPE", "html", null, "-//W3C//DTD HTML Transitional 4.01//EN", true]]},
+
+{"description":"DOCTYPE with publicId and systemId",
+"input":"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML Transitional 4.01//EN\" \"-//W3C//DTD HTML Transitional 4.01//EN\">",
+"output":[["DOCTYPE", "html", "-//W3C//DTD HTML Transitional 4.01//EN", "-//W3C//DTD HTML Transitional 4.01//EN", true]]},
+
+{"description":"DOCTYPE with > in double-quoted publicId",
+"input":"<!DOCTYPE html PUBLIC \">x",
+"output":["ParseError", ["DOCTYPE", "html", "", null, false], ["Character", "x"]]},
+
+{"description":"DOCTYPE with > in single-quoted publicId",
+"input":"<!DOCTYPE html PUBLIC '>x",
+"output":["ParseError", ["DOCTYPE", "html", "", null, false], ["Character", "x"]]},
+
+{"description":"DOCTYPE with > in double-quoted systemId",
+"input":"<!DOCTYPE html PUBLIC \"foo\" \">x",
+"output":["ParseError", ["DOCTYPE", "html", "foo", "", false], ["Character", "x"]]},
+
+{"description":"DOCTYPE with > in single-quoted systemId",
+"input":"<!DOCTYPE html PUBLIC 'foo' '>x",
+"output":["ParseError", ["DOCTYPE", "html", "foo", "", false], ["Character", "x"]]},
+
+{"description":"Incomplete doctype",
+"input":"<!DOCTYPE html ",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"Numeric entity representing the NUL character",
+"input":"&#0000;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Hexadecimal entity representing the NUL character",
+"input":"&#x0000;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Numeric entity representing a codepoint after 1114111 (U+10FFFF)",
+"input":"&#2225222;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Hexadecimal entity representing a codepoint after 1114111 (U+10FFFF)",
+"input":"&#x1010FFFF;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Hexadecimal entity pair representing a surrogate pair",
+"input":"&#xD869;&#xDED6;",
+"output":["ParseError", ["Character", "\uFFFD"], "ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Hexadecimal entity with mixed uppercase and lowercase",
+"input":"&#xaBcD;",
+"output":[["Character", "\uABCD"]]},
+
+{"description":"Entity without a name",
+"input":"&;",
+"output":[["Character", "&;"]]},
+
+{"description":"Unescaped ampersand in attribute value",
+"input":"<h a='&'>",
+"output":[["StartTag", "h", { "a":"&" }]]},
+
+{"description":"StartTag containing <",
+"input":"<a<b>",
+"output":[["StartTag", "a<b", { }]]},
+
+{"description":"Non-void element containing trailing /",
+"input":"<h/>",
+"output":[["StartTag","h",{},true]]},
+
+{"description":"Void element with permitted slash",
+"input":"<br/>",
+"output":[["StartTag","br",{},true]]},
+
+{"description":"Void element with permitted slash (with attribute)",
+"input":"<br foo='bar'/>",
+"output":[["StartTag","br",{"foo":"bar"},true]]},
+
+{"description":"StartTag containing /",
+"input":"<h/a='b'>",
+"output":["ParseError", ["StartTag", "h", { "a":"b" }]]},
+
+{"description":"Double-quoted attribute value",
+"input":"<h a=\"b\">",
+"output":[["StartTag", "h", { "a":"b" }]]},
+
+{"description":"Unescaped </",
+"input":"</",
+"output":["ParseError", ["Character", "</"]]},
+
+{"description":"Illegal end tag name",
+"input":"</1>",
+"output":["ParseError", ["Comment", "1"]]},
+
+{"description":"Simili processing instruction",
+"input":"<?namespace>",
+"output":["ParseError", ["Comment", "?namespace"]]},
+
+{"description":"A bogus comment stops at >, even if preceeded by two dashes",
+"input":"<?foo-->",
+"output":["ParseError", ["Comment", "?foo--"]]},
+
+{"description":"Unescaped <",
+"input":"foo < bar",
+"output":[["Character", "foo "], "ParseError", ["Character", "< bar"]]},
+
+{"description":"Null Byte Replacement",
+"input":"\u0000",
+"output":["ParseError", ["Character", "\u0000"]]},
+
+{"description":"Comment with dash",
+"input":"<!---x",
+"output":["ParseError", ["Comment", "-x"]]},
+
+{"description":"Entity + newline",
+"input":"\nx\n&gt;\n",
+"output":[["Character","\nx\n>\n"]]},
+
+{"description":"Start tag with no attributes but space before the greater-than sign",
+"input":"<h >",
+"output":[["StartTag", "h", {}]]},
+
+{"description":"Empty attribute followed by uppercase attribute",
+"input":"<h a B=''>",
+"output":[["StartTag", "h", {"a":"", "b":""}]]},
+
+{"description":"Double-quote after attribute name",
+"input":"<h a \">",
+"output":["ParseError", ["StartTag", "h", {"a":"", "\"":""}]]},
+
+{"description":"Single-quote after attribute name",
+"input":"<h a '>",
+"output":["ParseError", ["StartTag", "h", {"a":"", "'":""}]]},
+
+{"description":"Empty end tag with following characters",
+"input":"a</>bc",
+"output":[["Character", "a"], "ParseError", ["Character", "bc"]]},
+
+{"description":"Empty end tag with following tag",
+"input":"a</><b>c",
+"output":[["Character", "a"], "ParseError", ["StartTag", "b", {}], ["Character", "c"]]},
+
+{"description":"Empty end tag with following comment",
+"input":"a</><!--b-->c",
+"output":[["Character", "a"], "ParseError", ["Comment", "b"], ["Character", "c"]]},
+
+{"description":"Empty end tag with following end tag",
+"input":"a</></b>c",
+"output":[["Character", "a"], "ParseError", ["EndTag", "b"], ["Character", "c"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test3.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test3.test
new file mode 100644
index 000000000..8fc529a2b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test3.test
@@ -0,0 +1,6047 @@
+{"tests": [
+
+{"description":"",
+"input":"",
+"output":[]},
+
+{"description":"\\u0009",
+"input":"\u0009",
+"output":[["Character", "\u0009"]]},
+
+{"description":"\\u000A",
+"input":"\u000A",
+"output":[["Character", "\u000A"]]},
+
+{"description":"\\u000B",
+"input":"\u000B",
+"output":["ParseError", ["Character", "\u000B"]]},
+
+{"description":"\\u000C",
+"input":"\u000C",
+"output":[["Character", "\u000C"]]},
+
+{"description":" ",
+"input":" ",
+"output":[["Character", " "]]},
+
+{"description":"!",
+"input":"!",
+"output":[["Character", "!"]]},
+
+{"description":"\"",
+"input":"\"",
+"output":[["Character", "\""]]},
+
+{"description":"%",
+"input":"%",
+"output":[["Character", "%"]]},
+
+{"description":"&",
+"input":"&",
+"output":[["Character", "&"]]},
+
+{"description":"'",
+"input":"'",
+"output":[["Character", "'"]]},
+
+{"description":",",
+"input":",",
+"output":[["Character", ","]]},
+
+{"description":"-",
+"input":"-",
+"output":[["Character", "-"]]},
+
+{"description":".",
+"input":".",
+"output":[["Character", "."]]},
+
+{"description":"/",
+"input":"/",
+"output":[["Character", "/"]]},
+
+{"description":"0",
+"input":"0",
+"output":[["Character", "0"]]},
+
+{"description":"1",
+"input":"1",
+"output":[["Character", "1"]]},
+
+{"description":"9",
+"input":"9",
+"output":[["Character", "9"]]},
+
+{"description":";",
+"input":";",
+"output":[["Character", ";"]]},
+
+{"description":"<",
+"input":"<",
+"output":["ParseError", ["Character", "<"]]},
+
+{"description":"<\\u0000",
+"input":"<\u0000",
+"output":["ParseError", ["Character", "<"], "ParseError", ["Character", "\u0000"]]},
+
+{"description":"<\\u0009",
+"input":"<\u0009",
+"output":["ParseError", ["Character", "<\u0009"]]},
+
+{"description":"<\\u000A",
+"input":"<\u000A",
+"output":["ParseError", ["Character", "<\u000A"]]},
+
+{"description":"<\\u000B",
+"input":"<\u000B",
+"output":["ParseError", "ParseError", ["Character", "<\u000B"]]},
+
+{"description":"<\\u000C",
+"input":"<\u000C",
+"output":["ParseError", ["Character", "<\u000C"]]},
+
+{"description":"< ",
+"input":"< ",
+"output":["ParseError", ["Character", "< "]]},
+
+{"description":"<!",
+"input":"<!",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!\\u0000",
+"input":"<!\u0000",
+"output":["ParseError", ["Comment", "\uFFFD"]]},
+
+{"description":"<!\\u0009",
+"input":"<!\u0009",
+"output":["ParseError", ["Comment", "\u0009"]]},
+
+{"description":"<!\\u000A",
+"input":"<!\u000A",
+"output":["ParseError", ["Comment", "\u000A"]]},
+
+{"description":"<!\\u000B",
+"input":"<!\u000B",
+"output":["ParseError", "ParseError", ["Comment", "\u000B"]]},
+
+{"description":"<!\\u000C",
+"input":"<!\u000C",
+"output":["ParseError", ["Comment", "\u000C"]]},
+
+{"description":"<! ",
+"input":"<! ",
+"output":["ParseError", ["Comment", " "]]},
+
+{"description":"<!!",
+"input":"<!!",
+"output":["ParseError", ["Comment", "!"]]},
+
+{"description":"<!\"",
+"input":"<!\"",
+"output":["ParseError", ["Comment", "\""]]},
+
+{"description":"<!&",
+"input":"<!&",
+"output":["ParseError", ["Comment", "&"]]},
+
+{"description":"<!'",
+"input":"<!'",
+"output":["ParseError", ["Comment", "'"]]},
+
+{"description":"<!-",
+"input":"<!-",
+"output":["ParseError", ["Comment", "-"]]},
+
+{"description":"<!--",
+"input":"<!--",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!--\\u0000",
+"input":"<!--\u0000",
+"output":["ParseError", "ParseError", ["Comment", "\uFFFD"]]},
+
+{"description":"<!--\\u0009",
+"input":"<!--\u0009",
+"output":["ParseError", ["Comment", "\u0009"]]},
+
+{"description":"<!--\\u000A",
+"input":"<!--\u000A",
+"output":["ParseError", ["Comment", "\u000A"]]},
+
+{"description":"<!--\\u000B",
+"input":"<!--\u000B",
+"output":["ParseError", "ParseError", ["Comment", "\u000B"]]},
+
+{"description":"<!--\\u000C",
+"input":"<!--\u000C",
+"output":["ParseError", ["Comment", "\u000C"]]},
+
+{"description":"<!-- ",
+"input":"<!-- ",
+"output":["ParseError", ["Comment", " "]]},
+
+{"description":"<!-- \\u0000",
+"input":"<!-- \u0000",
+"output":["ParseError", "ParseError", ["Comment", " \uFFFD"]]},
+
+{"description":"<!-- \\u0009",
+"input":"<!-- \u0009",
+"output":["ParseError", ["Comment", " \u0009"]]},
+
+{"description":"<!-- \\u000A",
+"input":"<!-- \u000A",
+"output":["ParseError", ["Comment", " \u000A"]]},
+
+{"description":"<!-- \\u000B",
+"input":"<!-- \u000B",
+"output":["ParseError", "ParseError", ["Comment", " \u000B"]]},
+
+{"description":"<!-- \\u000C",
+"input":"<!-- \u000C",
+"output":["ParseError", ["Comment", " \u000C"]]},
+
+{"description":"<!-- ",
+"input":"<!-- ",
+"output":["ParseError", ["Comment", " "]]},
+
+{"description":"<!-- !",
+"input":"<!-- !",
+"output":["ParseError", ["Comment", " !"]]},
+
+{"description":"<!-- \"",
+"input":"<!-- \"",
+"output":["ParseError", ["Comment", " \""]]},
+
+{"description":"<!-- &",
+"input":"<!-- &",
+"output":["ParseError", ["Comment", " &"]]},
+
+{"description":"<!-- '",
+"input":"<!-- '",
+"output":["ParseError", ["Comment", " '"]]},
+
+{"description":"<!-- ,",
+"input":"<!-- ,",
+"output":["ParseError", ["Comment", " ,"]]},
+
+{"description":"<!-- -",
+"input":"<!-- -",
+"output":["ParseError", ["Comment", " "]]},
+
+{"description":"<!-- -\\u0000",
+"input":"<!-- -\u0000",
+"output":["ParseError", "ParseError", ["Comment", " -\uFFFD"]]},
+
+{"description":"<!-- -\\u0009",
+"input":"<!-- -\u0009",
+"output":["ParseError", ["Comment", " -\u0009"]]},
+
+{"description":"<!-- -\\u000A",
+"input":"<!-- -\u000A",
+"output":["ParseError", ["Comment", " -\u000A"]]},
+
+{"description":"<!-- -\\u000B",
+"input":"<!-- -\u000B",
+"output":["ParseError", "ParseError", ["Comment", " -\u000B"]]},
+
+{"description":"<!-- -\\u000C",
+"input":"<!-- -\u000C",
+"output":["ParseError", ["Comment", " -\u000C"]]},
+
+{"description":"<!-- - ",
+"input":"<!-- - ",
+"output":["ParseError", ["Comment", " - "]]},
+
+{"description":"<!-- -!",
+"input":"<!-- -!",
+"output":["ParseError", ["Comment", " -!"]]},
+
+{"description":"<!-- -\"",
+"input":"<!-- -\"",
+"output":["ParseError", ["Comment", " -\""]]},
+
+{"description":"<!-- -&",
+"input":"<!-- -&",
+"output":["ParseError", ["Comment", " -&"]]},
+
+{"description":"<!-- -'",
+"input":"<!-- -'",
+"output":["ParseError", ["Comment", " -'"]]},
+
+{"description":"<!-- -,",
+"input":"<!-- -,",
+"output":["ParseError", ["Comment", " -,"]]},
+
+{"description":"<!-- --",
+"input":"<!-- --",
+"output":["ParseError", ["Comment", " "]]},
+
+{"description":"<!-- -.",
+"input":"<!-- -.",
+"output":["ParseError", ["Comment", " -."]]},
+
+{"description":"<!-- -/",
+"input":"<!-- -/",
+"output":["ParseError", ["Comment", " -/"]]},
+
+{"description":"<!-- -0",
+"input":"<!-- -0",
+"output":["ParseError", ["Comment", " -0"]]},
+
+{"description":"<!-- -1",
+"input":"<!-- -1",
+"output":["ParseError", ["Comment", " -1"]]},
+
+{"description":"<!-- -9",
+"input":"<!-- -9",
+"output":["ParseError", ["Comment", " -9"]]},
+
+{"description":"<!-- -<",
+"input":"<!-- -<",
+"output":["ParseError", ["Comment", " -<"]]},
+
+{"description":"<!-- -=",
+"input":"<!-- -=",
+"output":["ParseError", ["Comment", " -="]]},
+
+{"description":"<!-- ->",
+"input":"<!-- ->",
+"output":["ParseError", ["Comment", " ->"]]},
+
+{"description":"<!-- -?",
+"input":"<!-- -?",
+"output":["ParseError", ["Comment", " -?"]]},
+
+{"description":"<!-- -@",
+"input":"<!-- -@",
+"output":["ParseError", ["Comment", " -@"]]},
+
+{"description":"<!-- -A",
+"input":"<!-- -A",
+"output":["ParseError", ["Comment", " -A"]]},
+
+{"description":"<!-- -B",
+"input":"<!-- -B",
+"output":["ParseError", ["Comment", " -B"]]},
+
+{"description":"<!-- -Y",
+"input":"<!-- -Y",
+"output":["ParseError", ["Comment", " -Y"]]},
+
+{"description":"<!-- -Z",
+"input":"<!-- -Z",
+"output":["ParseError", ["Comment", " -Z"]]},
+
+{"description":"<!-- -`",
+"input":"<!-- -`",
+"output":["ParseError", ["Comment", " -`"]]},
+
+{"description":"<!-- -a",
+"input":"<!-- -a",
+"output":["ParseError", ["Comment", " -a"]]},
+
+{"description":"<!-- -b",
+"input":"<!-- -b",
+"output":["ParseError", ["Comment", " -b"]]},
+
+{"description":"<!-- -y",
+"input":"<!-- -y",
+"output":["ParseError", ["Comment", " -y"]]},
+
+{"description":"<!-- -z",
+"input":"<!-- -z",
+"output":["ParseError", ["Comment", " -z"]]},
+
+{"description":"<!-- -{",
+"input":"<!-- -{",
+"output":["ParseError", ["Comment", " -{"]]},
+
+{"description":"<!-- -\\uDBC0\\uDC00",
+"input":"<!-- -\uDBC0\uDC00",
+"output":["ParseError", ["Comment", " -\uDBC0\uDC00"]]},
+
+{"description":"<!-- .",
+"input":"<!-- .",
+"output":["ParseError", ["Comment", " ."]]},
+
+{"description":"<!-- /",
+"input":"<!-- /",
+"output":["ParseError", ["Comment", " /"]]},
+
+{"description":"<!-- 0",
+"input":"<!-- 0",
+"output":["ParseError", ["Comment", " 0"]]},
+
+{"description":"<!-- 1",
+"input":"<!-- 1",
+"output":["ParseError", ["Comment", " 1"]]},
+
+{"description":"<!-- 9",
+"input":"<!-- 9",
+"output":["ParseError", ["Comment", " 9"]]},
+
+{"description":"<!-- <",
+"input":"<!-- <",
+"output":["ParseError", ["Comment", " <"]]},
+
+{"description":"<!-- =",
+"input":"<!-- =",
+"output":["ParseError", ["Comment", " ="]]},
+
+{"description":"<!-- >",
+"input":"<!-- >",
+"output":["ParseError", ["Comment", " >"]]},
+
+{"description":"<!-- ?",
+"input":"<!-- ?",
+"output":["ParseError", ["Comment", " ?"]]},
+
+{"description":"<!-- @",
+"input":"<!-- @",
+"output":["ParseError", ["Comment", " @"]]},
+
+{"description":"<!-- A",
+"input":"<!-- A",
+"output":["ParseError", ["Comment", " A"]]},
+
+{"description":"<!-- B",
+"input":"<!-- B",
+"output":["ParseError", ["Comment", " B"]]},
+
+{"description":"<!-- Y",
+"input":"<!-- Y",
+"output":["ParseError", ["Comment", " Y"]]},
+
+{"description":"<!-- Z",
+"input":"<!-- Z",
+"output":["ParseError", ["Comment", " Z"]]},
+
+{"description":"<!-- `",
+"input":"<!-- `",
+"output":["ParseError", ["Comment", " `"]]},
+
+{"description":"<!-- a",
+"input":"<!-- a",
+"output":["ParseError", ["Comment", " a"]]},
+
+{"description":"<!-- b",
+"input":"<!-- b",
+"output":["ParseError", ["Comment", " b"]]},
+
+{"description":"<!-- y",
+"input":"<!-- y",
+"output":["ParseError", ["Comment", " y"]]},
+
+{"description":"<!-- z",
+"input":"<!-- z",
+"output":["ParseError", ["Comment", " z"]]},
+
+{"description":"<!-- {",
+"input":"<!-- {",
+"output":["ParseError", ["Comment", " {"]]},
+
+{"description":"<!-- \\uDBC0\\uDC00",
+"input":"<!-- \uDBC0\uDC00",
+"output":["ParseError", ["Comment", " \uDBC0\uDC00"]]},
+
+{"description":"<!--!",
+"input":"<!--!",
+"output":["ParseError", ["Comment", "!"]]},
+
+{"description":"<!--\"",
+"input":"<!--\"",
+"output":["ParseError", ["Comment", "\""]]},
+
+{"description":"<!--&",
+"input":"<!--&",
+"output":["ParseError", ["Comment", "&"]]},
+
+{"description":"<!--'",
+"input":"<!--'",
+"output":["ParseError", ["Comment", "'"]]},
+
+{"description":"<!--,",
+"input":"<!--,",
+"output":["ParseError", ["Comment", ","]]},
+
+{"description":"<!---",
+"input":"<!---",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!---\\u0000",
+"input":"<!---\u0000",
+"output":["ParseError", "ParseError", ["Comment", "-\uFFFD"]]},
+
+{"description":"<!---\\u0009",
+"input":"<!---\u0009",
+"output":["ParseError", ["Comment", "-\u0009"]]},
+
+{"description":"<!---\\u000A",
+"input":"<!---\u000A",
+"output":["ParseError", ["Comment", "-\u000A"]]},
+
+{"description":"<!---\\u000B",
+"input":"<!---\u000B",
+"output":["ParseError", "ParseError", ["Comment", "-\u000B"]]},
+
+{"description":"<!---\\u000C",
+"input":"<!---\u000C",
+"output":["ParseError", ["Comment", "-\u000C"]]},
+
+{"description":"<!--- ",
+"input":"<!--- ",
+"output":["ParseError", ["Comment", "- "]]},
+
+{"description":"<!---!",
+"input":"<!---!",
+"output":["ParseError", ["Comment", "-!"]]},
+
+{"description":"<!---\"",
+"input":"<!---\"",
+"output":["ParseError", ["Comment", "-\""]]},
+
+{"description":"<!---&",
+"input":"<!---&",
+"output":["ParseError", ["Comment", "-&"]]},
+
+{"description":"<!---'",
+"input":"<!---'",
+"output":["ParseError", ["Comment", "-'"]]},
+
+{"description":"<!---,",
+"input":"<!---,",
+"output":["ParseError", ["Comment", "-,"]]},
+
+{"description":"<!----",
+"input":"<!----",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!----\\u0000",
+"input":"<!----\u0000",
+"output":["ParseError", "ParseError", ["Comment", "--\uFFFD"]]},
+
+{"description":"<!----\\u0009",
+"input":"<!----\u0009",
+"output":["ParseError", "ParseError", ["Comment", "--\u0009"]]},
+
+{"description":"<!----\\u000A",
+"input":"<!----\u000A",
+"output":["ParseError", "ParseError", ["Comment", "--\u000A"]]},
+
+{"description":"<!----\\u000B",
+"input":"<!----\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["Comment", "--\u000B"]]},
+
+{"description":"<!----\\u000C",
+"input":"<!----\u000C",
+"output":["ParseError", "ParseError", ["Comment", "--\u000C"]]},
+
+{"description":"<!---- ",
+"input":"<!---- ",
+"output":["ParseError", "ParseError", ["Comment", "-- "]]},
+
+{"description":"<!---- -",
+"input":"<!---- -",
+"output":["ParseError", "ParseError", ["Comment", "-- "]]},
+
+{"description":"<!---- --",
+"input":"<!---- --",
+"output":["ParseError", "ParseError", ["Comment", "-- "]]},
+
+{"description":"<!---- -->",
+"input":"<!---- -->",
+"output":["ParseError", ["Comment", "-- "]]},
+
+{"description":"<!---- -->",
+"input":"<!---- -->",
+"output":["ParseError", ["Comment", "-- "]]},
+
+{"description":"<!---- a-->",
+"input":"<!---- a-->",
+"output":["ParseError", ["Comment", "-- a"]]},
+
+{"description":"<!----!",
+"input":"<!----!",
+"output":["ParseError", "ParseError", ["Comment", ""]]},
+
+{"description":"<!----!>",
+"input":"<!----!>",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!----!a",
+"input":"<!----!a",
+"output":["ParseError", "ParseError", ["Comment", "--!a"]]},
+
+{"description":"<!----!a-",
+"input":"<!----!a-",
+"output":["ParseError", "ParseError", ["Comment", "--!a"]]},
+
+{"description":"<!----!a--",
+"input":"<!----!a--",
+"output":["ParseError", "ParseError", ["Comment", "--!a"]]},
+
+{"description":"<!----!a-->",
+"input":"<!----!a-->",
+"output":["ParseError", ["Comment", "--!a"]]},
+
+{"description":"<!----!-",
+"input":"<!----!-",
+"output":["ParseError", "ParseError", ["Comment", "--!"]]},
+
+{"description":"<!----!--",
+"input":"<!----!--",
+"output":["ParseError", "ParseError", ["Comment", "--!"]]},
+
+{"description":"<!----!-->",
+"input":"<!----!-->",
+"output":["ParseError", ["Comment", "--!"]]},
+
+{"description":"<!----\"",
+"input":"<!----\"",
+"output":["ParseError", "ParseError", ["Comment", "--\""]]},
+
+{"description":"<!----&",
+"input":"<!----&",
+"output":["ParseError", "ParseError", ["Comment", "--&"]]},
+
+{"description":"<!----'",
+"input":"<!----'",
+"output":["ParseError", "ParseError", ["Comment", "--'"]]},
+
+{"description":"<!----,",
+"input":"<!----,",
+"output":["ParseError", "ParseError", ["Comment", "--,"]]},
+
+{"description":"<!-----",
+"input":"<!-----",
+"output":["ParseError", "ParseError", ["Comment", "-"]]},
+
+{"description":"<!----.",
+"input":"<!----.",
+"output":["ParseError", "ParseError", ["Comment", "--."]]},
+
+{"description":"<!----/",
+"input":"<!----/",
+"output":["ParseError", "ParseError", ["Comment", "--/"]]},
+
+{"description":"<!----0",
+"input":"<!----0",
+"output":["ParseError", "ParseError", ["Comment", "--0"]]},
+
+{"description":"<!----1",
+"input":"<!----1",
+"output":["ParseError", "ParseError", ["Comment", "--1"]]},
+
+{"description":"<!----9",
+"input":"<!----9",
+"output":["ParseError", "ParseError", ["Comment", "--9"]]},
+
+{"description":"<!----<",
+"input":"<!----<",
+"output":["ParseError", "ParseError", ["Comment", "--<"]]},
+
+{"description":"<!----=",
+"input":"<!----=",
+"output":["ParseError", "ParseError", ["Comment", "--="]]},
+
+{"description":"<!---->",
+"input":"<!---->",
+"output":[["Comment", ""]]},
+
+{"description":"<!----?",
+"input":"<!----?",
+"output":["ParseError", "ParseError", ["Comment", "--?"]]},
+
+{"description":"<!----@",
+"input":"<!----@",
+"output":["ParseError", "ParseError", ["Comment", "--@"]]},
+
+{"description":"<!----A",
+"input":"<!----A",
+"output":["ParseError", "ParseError", ["Comment", "--A"]]},
+
+{"description":"<!----B",
+"input":"<!----B",
+"output":["ParseError", "ParseError", ["Comment", "--B"]]},
+
+{"description":"<!----Y",
+"input":"<!----Y",
+"output":["ParseError", "ParseError", ["Comment", "--Y"]]},
+
+{"description":"<!----Z",
+"input":"<!----Z",
+"output":["ParseError", "ParseError", ["Comment", "--Z"]]},
+
+{"description":"<!----`",
+"input":"<!----`",
+"output":["ParseError", "ParseError", ["Comment", "--`"]]},
+
+{"description":"<!----a",
+"input":"<!----a",
+"output":["ParseError", "ParseError", ["Comment", "--a"]]},
+
+{"description":"<!----b",
+"input":"<!----b",
+"output":["ParseError", "ParseError", ["Comment", "--b"]]},
+
+{"description":"<!----y",
+"input":"<!----y",
+"output":["ParseError", "ParseError", ["Comment", "--y"]]},
+
+{"description":"<!----z",
+"input":"<!----z",
+"output":["ParseError", "ParseError", ["Comment", "--z"]]},
+
+{"description":"<!----{",
+"input":"<!----{",
+"output":["ParseError", "ParseError", ["Comment", "--{"]]},
+
+{"description":"<!----\\uDBC0\\uDC00",
+"input":"<!----\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["Comment", "--\uDBC0\uDC00"]]},
+
+{"description":"<!---.",
+"input":"<!---.",
+"output":["ParseError", ["Comment", "-."]]},
+
+{"description":"<!---/",
+"input":"<!---/",
+"output":["ParseError", ["Comment", "-/"]]},
+
+{"description":"<!---0",
+"input":"<!---0",
+"output":["ParseError", ["Comment", "-0"]]},
+
+{"description":"<!---1",
+"input":"<!---1",
+"output":["ParseError", ["Comment", "-1"]]},
+
+{"description":"<!---9",
+"input":"<!---9",
+"output":["ParseError", ["Comment", "-9"]]},
+
+{"description":"<!---<",
+"input":"<!---<",
+"output":["ParseError", ["Comment", "-<"]]},
+
+{"description":"<!---=",
+"input":"<!---=",
+"output":["ParseError", ["Comment", "-="]]},
+
+{"description":"<!--->",
+"input":"<!--->",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!---?",
+"input":"<!---?",
+"output":["ParseError", ["Comment", "-?"]]},
+
+{"description":"<!---@",
+"input":"<!---@",
+"output":["ParseError", ["Comment", "-@"]]},
+
+{"description":"<!---A",
+"input":"<!---A",
+"output":["ParseError", ["Comment", "-A"]]},
+
+{"description":"<!---B",
+"input":"<!---B",
+"output":["ParseError", ["Comment", "-B"]]},
+
+{"description":"<!---Y",
+"input":"<!---Y",
+"output":["ParseError", ["Comment", "-Y"]]},
+
+{"description":"<!---Z",
+"input":"<!---Z",
+"output":["ParseError", ["Comment", "-Z"]]},
+
+{"description":"<!---`",
+"input":"<!---`",
+"output":["ParseError", ["Comment", "-`"]]},
+
+{"description":"<!---a",
+"input":"<!---a",
+"output":["ParseError", ["Comment", "-a"]]},
+
+{"description":"<!---b",
+"input":"<!---b",
+"output":["ParseError", ["Comment", "-b"]]},
+
+{"description":"<!---y",
+"input":"<!---y",
+"output":["ParseError", ["Comment", "-y"]]},
+
+{"description":"<!---z",
+"input":"<!---z",
+"output":["ParseError", ["Comment", "-z"]]},
+
+{"description":"<!---{",
+"input":"<!---{",
+"output":["ParseError", ["Comment", "-{"]]},
+
+{"description":"<!---\\uDBC0\\uDC00",
+"input":"<!---\uDBC0\uDC00",
+"output":["ParseError", ["Comment", "-\uDBC0\uDC00"]]},
+
+{"description":"<!--.",
+"input":"<!--.",
+"output":["ParseError", ["Comment", "."]]},
+
+{"description":"<!--/",
+"input":"<!--/",
+"output":["ParseError", ["Comment", "/"]]},
+
+{"description":"<!--0",
+"input":"<!--0",
+"output":["ParseError", ["Comment", "0"]]},
+
+{"description":"<!--1",
+"input":"<!--1",
+"output":["ParseError", ["Comment", "1"]]},
+
+{"description":"<!--9",
+"input":"<!--9",
+"output":["ParseError", ["Comment", "9"]]},
+
+{"description":"<!--<",
+"input":"<!--<",
+"output":["ParseError", ["Comment", "<"]]},
+
+{"description":"<!--=",
+"input":"<!--=",
+"output":["ParseError", ["Comment", "="]]},
+
+{"description":"<!-->",
+"input":"<!-->",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!--?",
+"input":"<!--?",
+"output":["ParseError", ["Comment", "?"]]},
+
+{"description":"<!--@",
+"input":"<!--@",
+"output":["ParseError", ["Comment", "@"]]},
+
+{"description":"<!--A",
+"input":"<!--A",
+"output":["ParseError", ["Comment", "A"]]},
+
+{"description":"<!--B",
+"input":"<!--B",
+"output":["ParseError", ["Comment", "B"]]},
+
+{"description":"<!--Y",
+"input":"<!--Y",
+"output":["ParseError", ["Comment", "Y"]]},
+
+{"description":"<!--Z",
+"input":"<!--Z",
+"output":["ParseError", ["Comment", "Z"]]},
+
+{"description":"<!--`",
+"input":"<!--`",
+"output":["ParseError", ["Comment", "`"]]},
+
+{"description":"<!--a",
+"input":"<!--a",
+"output":["ParseError", ["Comment", "a"]]},
+
+{"description":"<!--b",
+"input":"<!--b",
+"output":["ParseError", ["Comment", "b"]]},
+
+{"description":"<!--y",
+"input":"<!--y",
+"output":["ParseError", ["Comment", "y"]]},
+
+{"description":"<!--z",
+"input":"<!--z",
+"output":["ParseError", ["Comment", "z"]]},
+
+{"description":"<!--{",
+"input":"<!--{",
+"output":["ParseError", ["Comment", "{"]]},
+
+{"description":"<!--\\uDBC0\\uDC00",
+"input":"<!--\uDBC0\uDC00",
+"output":["ParseError", ["Comment", "\uDBC0\uDC00"]]},
+
+{"description":"<!/",
+"input":"<!/",
+"output":["ParseError", ["Comment", "/"]]},
+
+{"description":"<!0",
+"input":"<!0",
+"output":["ParseError", ["Comment", "0"]]},
+
+{"description":"<!1",
+"input":"<!1",
+"output":["ParseError", ["Comment", "1"]]},
+
+{"description":"<!9",
+"input":"<!9",
+"output":["ParseError", ["Comment", "9"]]},
+
+{"description":"<!<",
+"input":"<!<",
+"output":["ParseError", ["Comment", "<"]]},
+
+{"description":"<!=",
+"input":"<!=",
+"output":["ParseError", ["Comment", "="]]},
+
+{"description":"<!>",
+"input":"<!>",
+"output":["ParseError", ["Comment", ""]]},
+
+{"description":"<!?",
+"input":"<!?",
+"output":["ParseError", ["Comment", "?"]]},
+
+{"description":"<!@",
+"input":"<!@",
+"output":["ParseError", ["Comment", "@"]]},
+
+{"description":"<!A",
+"input":"<!A",
+"output":["ParseError", ["Comment", "A"]]},
+
+{"description":"<!B",
+"input":"<!B",
+"output":["ParseError", ["Comment", "B"]]},
+
+{"description":"<!DOCTYPE",
+"input":"<!DOCTYPE",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE\\u0000",
+"input":"<!DOCTYPE\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "\uFFFD", null, null, false]]},
+
+{"description":"<!DOCTYPE\\u0008",
+"input":"<!DOCTYPE\u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "\u0008", null, null, false]]},
+
+{"description":"<!DOCTYPE\\u0009",
+"input":"<!DOCTYPE\u0009",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE\\u000A",
+"input":"<!DOCTYPE\u000A",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE\\u000B",
+"input":"<!DOCTYPE\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "\u000B", null, null, false]]},
+
+{"description":"<!DOCTYPE\\u000C",
+"input":"<!DOCTYPE\u000C",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE\\u000D",
+"input":"<!DOCTYPE\u000D",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE\\u001F",
+"input":"<!DOCTYPE\u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "\u001F", null, null, false]]},
+
+{"description":"<!DOCTYPE ",
+"input":"<!DOCTYPE ",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE \\u0000",
+"input":"<!DOCTYPE \u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "\uFFFD", null, null, false]]},
+
+{"description":"<!DOCTYPE \\u0008",
+"input":"<!DOCTYPE \u0008",
+"output":["ParseError", "ParseError", ["DOCTYPE", "\u0008", null, null, false]]},
+
+{"description":"<!DOCTYPE \\u0009",
+"input":"<!DOCTYPE \u0009",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE \\u000A",
+"input":"<!DOCTYPE \u000A",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE \\u000B",
+"input":"<!DOCTYPE \u000B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "\u000B", null, null, false]]},
+
+{"description":"<!DOCTYPE \\u000C",
+"input":"<!DOCTYPE \u000C",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE \\u000D",
+"input":"<!DOCTYPE \u000D",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE \\u001F",
+"input":"<!DOCTYPE \u001F",
+"output":["ParseError", "ParseError", ["DOCTYPE", "\u001F", null, null, false]]},
+
+{"description":"<!DOCTYPE ",
+"input":"<!DOCTYPE ",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE !",
+"input":"<!DOCTYPE !",
+"output":["ParseError", ["DOCTYPE", "!", null, null, false]]},
+
+{"description":"<!DOCTYPE \"",
+"input":"<!DOCTYPE \"",
+"output":["ParseError", ["DOCTYPE", "\"", null, null, false]]},
+
+{"description":"<!DOCTYPE &",
+"input":"<!DOCTYPE &",
+"output":["ParseError", ["DOCTYPE", "&", null, null, false]]},
+
+{"description":"<!DOCTYPE '",
+"input":"<!DOCTYPE '",
+"output":["ParseError", ["DOCTYPE", "'", null, null, false]]},
+
+{"description":"<!DOCTYPE -",
+"input":"<!DOCTYPE -",
+"output":["ParseError", ["DOCTYPE", "-", null, null, false]]},
+
+{"description":"<!DOCTYPE /",
+"input":"<!DOCTYPE /",
+"output":["ParseError", ["DOCTYPE", "/", null, null, false]]},
+
+{"description":"<!DOCTYPE 0",
+"input":"<!DOCTYPE 0",
+"output":["ParseError", ["DOCTYPE", "0", null, null, false]]},
+
+{"description":"<!DOCTYPE 1",
+"input":"<!DOCTYPE 1",
+"output":["ParseError", ["DOCTYPE", "1", null, null, false]]},
+
+{"description":"<!DOCTYPE 9",
+"input":"<!DOCTYPE 9",
+"output":["ParseError", ["DOCTYPE", "9", null, null, false]]},
+
+{"description":"<!DOCTYPE <",
+"input":"<!DOCTYPE <",
+"output":["ParseError", ["DOCTYPE", "<", null, null, false]]},
+
+{"description":"<!DOCTYPE =",
+"input":"<!DOCTYPE =",
+"output":["ParseError", ["DOCTYPE", "=", null, null, false]]},
+
+{"description":"<!DOCTYPE >",
+"input":"<!DOCTYPE >",
+"output":["ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE ?",
+"input":"<!DOCTYPE ?",
+"output":["ParseError", ["DOCTYPE", "?", null, null, false]]},
+
+{"description":"<!DOCTYPE @",
+"input":"<!DOCTYPE @",
+"output":["ParseError", ["DOCTYPE", "@", null, null, false]]},
+
+{"description":"<!DOCTYPE A",
+"input":"<!DOCTYPE A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE B",
+"input":"<!DOCTYPE B",
+"output":["ParseError", ["DOCTYPE", "b", null, null, false]]},
+
+{"description":"<!DOCTYPE Y",
+"input":"<!DOCTYPE Y",
+"output":["ParseError", ["DOCTYPE", "y", null, null, false]]},
+
+{"description":"<!DOCTYPE Z",
+"input":"<!DOCTYPE Z",
+"output":["ParseError", ["DOCTYPE", "z", null, null, false]]},
+
+{"description":"<!DOCTYPE [",
+"input":"<!DOCTYPE [",
+"output":["ParseError", ["DOCTYPE", "[", null, null, false]]},
+
+{"description":"<!DOCTYPE `",
+"input":"<!DOCTYPE `",
+"output":["ParseError", ["DOCTYPE", "`", null, null, false]]},
+
+{"description":"<!DOCTYPE a",
+"input":"<!DOCTYPE a",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u0000",
+"input":"<!DOCTYPE a\u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a\uFFFD", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u0008",
+"input":"<!DOCTYPE a\u0008",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a\u0008", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u0009",
+"input":"<!DOCTYPE a\u0009",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u000A",
+"input":"<!DOCTYPE a\u000A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u000B",
+"input":"<!DOCTYPE a\u000B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a\u000B", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u000C",
+"input":"<!DOCTYPE a\u000C",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u000D",
+"input":"<!DOCTYPE a\u000D",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\u001F",
+"input":"<!DOCTYPE a\u001F",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a\u001F", null, null, false]]},
+
+{"description":"<!DOCTYPE a ",
+"input":"<!DOCTYPE a ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u0000",
+"input":"<!DOCTYPE a \u0000",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u0008",
+"input":"<!DOCTYPE a \u0008",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u0009",
+"input":"<!DOCTYPE a \u0009",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u000A",
+"input":"<!DOCTYPE a \u000A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u000B",
+"input":"<!DOCTYPE a \u000B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u000C",
+"input":"<!DOCTYPE a \u000C",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u000D",
+"input":"<!DOCTYPE a \u000D",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\u001F",
+"input":"<!DOCTYPE a \u001F",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a ",
+"input":"<!DOCTYPE a ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a !",
+"input":"<!DOCTYPE a !",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \"",
+"input":"<!DOCTYPE a \"",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a &",
+"input":"<!DOCTYPE a &",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a '",
+"input":"<!DOCTYPE a '",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a -",
+"input":"<!DOCTYPE a -",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a /",
+"input":"<!DOCTYPE a /",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a 0",
+"input":"<!DOCTYPE a 0",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a 1",
+"input":"<!DOCTYPE a 1",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a 9",
+"input":"<!DOCTYPE a 9",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a <",
+"input":"<!DOCTYPE a <",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a =",
+"input":"<!DOCTYPE a =",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a >",
+"input":"<!DOCTYPE a >",
+"output":[["DOCTYPE", "a", null, null, true]]},
+
+{"description":"<!DOCTYPE a ?",
+"input":"<!DOCTYPE a ?",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a @",
+"input":"<!DOCTYPE a @",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a A",
+"input":"<!DOCTYPE a A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a B",
+"input":"<!DOCTYPE a B",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC",
+"input":"<!DOCTYPE a PUBLIC",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u0000",
+"input":"<!DOCTYPE a PUBLIC\u0000",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u0008",
+"input":"<!DOCTYPE a PUBLIC\u0008",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u0009",
+"input":"<!DOCTYPE a PUBLIC\u0009",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u000A",
+"input":"<!DOCTYPE a PUBLIC\u000A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u000B",
+"input":"<!DOCTYPE a PUBLIC\u000B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u000C",
+"input":"<!DOCTYPE a PUBLIC\u000C",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u000D",
+"input":"<!DOCTYPE a PUBLIC\u000D",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\u001F",
+"input":"<!DOCTYPE a PUBLIC\u001F",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC ",
+"input":"<!DOCTYPE a PUBLIC ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC!",
+"input":"<!DOCTYPE a PUBLIC!",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"",
+"input":"<!DOCTYPE a PUBLIC\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\\u0000",
+"input":"<!DOCTYPE a PUBLIC\"\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\uFFFD", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\\u0009",
+"input":"<!DOCTYPE a PUBLIC\"\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\u0009", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\\u000A",
+"input":"<!DOCTYPE a PUBLIC\"\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\u000A", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\\u000B",
+"input":"<!DOCTYPE a PUBLIC\"\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000B", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\\u000C",
+"input":"<!DOCTYPE a PUBLIC\"\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\u000C", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\" ",
+"input":"<!DOCTYPE a PUBLIC\" ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", " ", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"!",
+"input":"<!DOCTYPE a PUBLIC\"!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "!", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\"",
+"input":"<!DOCTYPE a PUBLIC\"\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"#",
+"input":"<!DOCTYPE a PUBLIC\"#",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "#", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"&",
+"input":"<!DOCTYPE a PUBLIC\"&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "&", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"'",
+"input":"<!DOCTYPE a PUBLIC\"'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "'", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"-",
+"input":"<!DOCTYPE a PUBLIC\"-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "-", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"/",
+"input":"<!DOCTYPE a PUBLIC\"/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "/", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"0",
+"input":"<!DOCTYPE a PUBLIC\"0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "0", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"1",
+"input":"<!DOCTYPE a PUBLIC\"1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "1", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"9",
+"input":"<!DOCTYPE a PUBLIC\"9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "9", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"<",
+"input":"<!DOCTYPE a PUBLIC\"<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "<", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"=",
+"input":"<!DOCTYPE a PUBLIC\"=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "=", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\">",
+"input":"<!DOCTYPE a PUBLIC\">",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"?",
+"input":"<!DOCTYPE a PUBLIC\"?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "?", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"@",
+"input":"<!DOCTYPE a PUBLIC\"@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "@", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"A",
+"input":"<!DOCTYPE a PUBLIC\"A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "A", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"B",
+"input":"<!DOCTYPE a PUBLIC\"B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "B", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"Y",
+"input":"<!DOCTYPE a PUBLIC\"Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "Y", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"Z",
+"input":"<!DOCTYPE a PUBLIC\"Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "Z", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"`",
+"input":"<!DOCTYPE a PUBLIC\"`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "`", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"a",
+"input":"<!DOCTYPE a PUBLIC\"a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "a", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"b",
+"input":"<!DOCTYPE a PUBLIC\"b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "b", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"y",
+"input":"<!DOCTYPE a PUBLIC\"y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "y", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"z",
+"input":"<!DOCTYPE a PUBLIC\"z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "z", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"{",
+"input":"<!DOCTYPE a PUBLIC\"{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "{", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\"\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a PUBLIC\"\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\uDBC0\uDC00", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC#",
+"input":"<!DOCTYPE a PUBLIC#",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC&",
+"input":"<!DOCTYPE a PUBLIC&",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'",
+"input":"<!DOCTYPE a PUBLIC'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\\u0000",
+"input":"<!DOCTYPE a PUBLIC'\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\uFFFD", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\\u0009",
+"input":"<!DOCTYPE a PUBLIC'\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\u0009", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\\u000A",
+"input":"<!DOCTYPE a PUBLIC'\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\u000A", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\\u000B",
+"input":"<!DOCTYPE a PUBLIC'\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000B", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\\u000C",
+"input":"<!DOCTYPE a PUBLIC'\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\u000C", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC' ",
+"input":"<!DOCTYPE a PUBLIC' ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", " ", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'!",
+"input":"<!DOCTYPE a PUBLIC'!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "!", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\"",
+"input":"<!DOCTYPE a PUBLIC'\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\"", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'&",
+"input":"<!DOCTYPE a PUBLIC'&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "&", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''",
+"input":"<!DOCTYPE a PUBLIC''",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u0000",
+"input":"<!DOCTYPE a PUBLIC''\u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u0008",
+"input":"<!DOCTYPE a PUBLIC''\u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u0009",
+"input":"<!DOCTYPE a PUBLIC''\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u000A",
+"input":"<!DOCTYPE a PUBLIC''\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u000B",
+"input":"<!DOCTYPE a PUBLIC''\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u000C",
+"input":"<!DOCTYPE a PUBLIC''\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u000D",
+"input":"<!DOCTYPE a PUBLIC''\u000D",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\u001F",
+"input":"<!DOCTYPE a PUBLIC''\u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'' ",
+"input":"<!DOCTYPE a PUBLIC'' ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''!",
+"input":"<!DOCTYPE a PUBLIC''!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\"",
+"input":"<!DOCTYPE a PUBLIC''\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", "", false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''#",
+"input":"<!DOCTYPE a PUBLIC''#",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''&",
+"input":"<!DOCTYPE a PUBLIC''&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'''",
+"input":"<!DOCTYPE a PUBLIC'''",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", "", false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''(",
+"input":"<!DOCTYPE a PUBLIC''(",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''-",
+"input":"<!DOCTYPE a PUBLIC''-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''/",
+"input":"<!DOCTYPE a PUBLIC''/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''0",
+"input":"<!DOCTYPE a PUBLIC''0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''1",
+"input":"<!DOCTYPE a PUBLIC''1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''9",
+"input":"<!DOCTYPE a PUBLIC''9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''<",
+"input":"<!DOCTYPE a PUBLIC''<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''=",
+"input":"<!DOCTYPE a PUBLIC''=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''>",
+"input":"<!DOCTYPE a PUBLIC''>",
+"output":["ParseError", ["DOCTYPE", "a", "", null, true]]},
+
+{"description":"<!DOCTYPE a PUBLIC''?",
+"input":"<!DOCTYPE a PUBLIC''?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''@",
+"input":"<!DOCTYPE a PUBLIC''@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''A",
+"input":"<!DOCTYPE a PUBLIC''A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''B",
+"input":"<!DOCTYPE a PUBLIC''B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''Y",
+"input":"<!DOCTYPE a PUBLIC''Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''Z",
+"input":"<!DOCTYPE a PUBLIC''Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''`",
+"input":"<!DOCTYPE a PUBLIC''`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''a",
+"input":"<!DOCTYPE a PUBLIC''a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''b",
+"input":"<!DOCTYPE a PUBLIC''b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''y",
+"input":"<!DOCTYPE a PUBLIC''y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''z",
+"input":"<!DOCTYPE a PUBLIC''z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''{",
+"input":"<!DOCTYPE a PUBLIC''{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC''\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a PUBLIC''\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'(",
+"input":"<!DOCTYPE a PUBLIC'(",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "(", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'-",
+"input":"<!DOCTYPE a PUBLIC'-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "-", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'/",
+"input":"<!DOCTYPE a PUBLIC'/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "/", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'0",
+"input":"<!DOCTYPE a PUBLIC'0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "0", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'1",
+"input":"<!DOCTYPE a PUBLIC'1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "1", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'9",
+"input":"<!DOCTYPE a PUBLIC'9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "9", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'<",
+"input":"<!DOCTYPE a PUBLIC'<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "<", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'=",
+"input":"<!DOCTYPE a PUBLIC'=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "=", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'>",
+"input":"<!DOCTYPE a PUBLIC'>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'?",
+"input":"<!DOCTYPE a PUBLIC'?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "?", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'@",
+"input":"<!DOCTYPE a PUBLIC'@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "@", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'A",
+"input":"<!DOCTYPE a PUBLIC'A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "A", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'B",
+"input":"<!DOCTYPE a PUBLIC'B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "B", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'Y",
+"input":"<!DOCTYPE a PUBLIC'Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "Y", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'Z",
+"input":"<!DOCTYPE a PUBLIC'Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "Z", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'`",
+"input":"<!DOCTYPE a PUBLIC'`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "`", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'a",
+"input":"<!DOCTYPE a PUBLIC'a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "a", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'b",
+"input":"<!DOCTYPE a PUBLIC'b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "b", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'y",
+"input":"<!DOCTYPE a PUBLIC'y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "y", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'z",
+"input":"<!DOCTYPE a PUBLIC'z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "z", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'{",
+"input":"<!DOCTYPE a PUBLIC'{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "{", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC'\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a PUBLIC'\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "\uDBC0\uDC00", null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC(",
+"input":"<!DOCTYPE a PUBLIC(",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC-",
+"input":"<!DOCTYPE a PUBLIC-",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC/",
+"input":"<!DOCTYPE a PUBLIC/",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC0",
+"input":"<!DOCTYPE a PUBLIC0",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC1",
+"input":"<!DOCTYPE a PUBLIC1",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC9",
+"input":"<!DOCTYPE a PUBLIC9",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC<",
+"input":"<!DOCTYPE a PUBLIC<",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC=",
+"input":"<!DOCTYPE a PUBLIC=",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC>",
+"input":"<!DOCTYPE a PUBLIC>",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC?",
+"input":"<!DOCTYPE a PUBLIC?",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC@",
+"input":"<!DOCTYPE a PUBLIC@",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICA",
+"input":"<!DOCTYPE a PUBLICA",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICB",
+"input":"<!DOCTYPE a PUBLICB",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICY",
+"input":"<!DOCTYPE a PUBLICY",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICZ",
+"input":"<!DOCTYPE a PUBLICZ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC`",
+"input":"<!DOCTYPE a PUBLIC`",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICa",
+"input":"<!DOCTYPE a PUBLICa",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICb",
+"input":"<!DOCTYPE a PUBLICb",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICy",
+"input":"<!DOCTYPE a PUBLICy",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLICz",
+"input":"<!DOCTYPE a PUBLICz",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC{",
+"input":"<!DOCTYPE a PUBLIC{",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a PUBLIC\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a PUBLIC\uDBC0\uDC00",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM",
+"input":"<!DOCTYPE a SYSTEM",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u0000",
+"input":"<!DOCTYPE a SYSTEM\u0000",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u0008",
+"input":"<!DOCTYPE a SYSTEM\u0008",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u0009",
+"input":"<!DOCTYPE a SYSTEM\u0009",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u000A",
+"input":"<!DOCTYPE a SYSTEM\u000A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u000B",
+"input":"<!DOCTYPE a SYSTEM\u000B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u000C",
+"input":"<!DOCTYPE a SYSTEM\u000C",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u000D",
+"input":"<!DOCTYPE a SYSTEM\u000D",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\u001F",
+"input":"<!DOCTYPE a SYSTEM\u001F",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM ",
+"input":"<!DOCTYPE a SYSTEM ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM!",
+"input":"<!DOCTYPE a SYSTEM!",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"",
+"input":"<!DOCTYPE a SYSTEM\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\\u0000",
+"input":"<!DOCTYPE a SYSTEM\"\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\uFFFD", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\\u0009",
+"input":"<!DOCTYPE a SYSTEM\"\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\u0009", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\\u000A",
+"input":"<!DOCTYPE a SYSTEM\"\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000A", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\\u000B",
+"input":"<!DOCTYPE a SYSTEM\"\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000B", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\\u000C",
+"input":"<!DOCTYPE a SYSTEM\"\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000C", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\" ",
+"input":"<!DOCTYPE a SYSTEM\" ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, " ", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"!",
+"input":"<!DOCTYPE a SYSTEM\"!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "!", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\"",
+"input":"<!DOCTYPE a SYSTEM\"\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"#",
+"input":"<!DOCTYPE a SYSTEM\"#",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "#", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"&",
+"input":"<!DOCTYPE a SYSTEM\"&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "&", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"'",
+"input":"<!DOCTYPE a SYSTEM\"'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "'", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"-",
+"input":"<!DOCTYPE a SYSTEM\"-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "-", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"/",
+"input":"<!DOCTYPE a SYSTEM\"/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "/", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"0",
+"input":"<!DOCTYPE a SYSTEM\"0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "0", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"1",
+"input":"<!DOCTYPE a SYSTEM\"1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "1", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"9",
+"input":"<!DOCTYPE a SYSTEM\"9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "9", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"<",
+"input":"<!DOCTYPE a SYSTEM\"<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "<", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"=",
+"input":"<!DOCTYPE a SYSTEM\"=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "=", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\">",
+"input":"<!DOCTYPE a SYSTEM\">",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"?",
+"input":"<!DOCTYPE a SYSTEM\"?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "?", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"@",
+"input":"<!DOCTYPE a SYSTEM\"@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "@", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"A",
+"input":"<!DOCTYPE a SYSTEM\"A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "A", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"B",
+"input":"<!DOCTYPE a SYSTEM\"B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "B", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"Y",
+"input":"<!DOCTYPE a SYSTEM\"Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "Y", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"Z",
+"input":"<!DOCTYPE a SYSTEM\"Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "Z", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"`",
+"input":"<!DOCTYPE a SYSTEM\"`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "`", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"a",
+"input":"<!DOCTYPE a SYSTEM\"a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "a", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"b",
+"input":"<!DOCTYPE a SYSTEM\"b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "b", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"y",
+"input":"<!DOCTYPE a SYSTEM\"y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "y", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"z",
+"input":"<!DOCTYPE a SYSTEM\"z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "z", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"{",
+"input":"<!DOCTYPE a SYSTEM\"{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "{", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\"\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a SYSTEM\"\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\uDBC0\uDC00", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM#",
+"input":"<!DOCTYPE a SYSTEM#",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM&",
+"input":"<!DOCTYPE a SYSTEM&",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'",
+"input":"<!DOCTYPE a SYSTEM'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\\u0000",
+"input":"<!DOCTYPE a SYSTEM'\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\uFFFD", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\\u0009",
+"input":"<!DOCTYPE a SYSTEM'\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\u0009", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\\u000A",
+"input":"<!DOCTYPE a SYSTEM'\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000A", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\\u000B",
+"input":"<!DOCTYPE a SYSTEM'\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000B", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\\u000C",
+"input":"<!DOCTYPE a SYSTEM'\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000C", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM' ",
+"input":"<!DOCTYPE a SYSTEM' ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, " ", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'!",
+"input":"<!DOCTYPE a SYSTEM'!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "!", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\"",
+"input":"<!DOCTYPE a SYSTEM'\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\"", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'&",
+"input":"<!DOCTYPE a SYSTEM'&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "&", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''",
+"input":"<!DOCTYPE a SYSTEM''",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u0000",
+"input":"<!DOCTYPE a SYSTEM''\u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u0008",
+"input":"<!DOCTYPE a SYSTEM''\u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u0009",
+"input":"<!DOCTYPE a SYSTEM''\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u000A",
+"input":"<!DOCTYPE a SYSTEM''\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u000B",
+"input":"<!DOCTYPE a SYSTEM''\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u000C",
+"input":"<!DOCTYPE a SYSTEM''\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u000D",
+"input":"<!DOCTYPE a SYSTEM''\u000D",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\u001F",
+"input":"<!DOCTYPE a SYSTEM''\u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM'' ",
+"input":"<!DOCTYPE a SYSTEM'' ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM''!",
+"input":"<!DOCTYPE a SYSTEM''!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\"",
+"input":"<!DOCTYPE a SYSTEM''\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''&",
+"input":"<!DOCTYPE a SYSTEM''&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM'''",
+"input":"<!DOCTYPE a SYSTEM'''",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''-",
+"input":"<!DOCTYPE a SYSTEM''-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''/",
+"input":"<!DOCTYPE a SYSTEM''/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''0",
+"input":"<!DOCTYPE a SYSTEM''0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''1",
+"input":"<!DOCTYPE a SYSTEM''1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''9",
+"input":"<!DOCTYPE a SYSTEM''9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''<",
+"input":"<!DOCTYPE a SYSTEM''<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''=",
+"input":"<!DOCTYPE a SYSTEM''=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''>",
+"input":"<!DOCTYPE a SYSTEM''>",
+"output":["ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''?",
+"input":"<!DOCTYPE a SYSTEM''?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''@",
+"input":"<!DOCTYPE a SYSTEM''@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''A",
+"input":"<!DOCTYPE a SYSTEM''A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''B",
+"input":"<!DOCTYPE a SYSTEM''B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''Y",
+"input":"<!DOCTYPE a SYSTEM''Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''Z",
+"input":"<!DOCTYPE a SYSTEM''Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''`",
+"input":"<!DOCTYPE a SYSTEM''`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''a",
+"input":"<!DOCTYPE a SYSTEM''a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''b",
+"input":"<!DOCTYPE a SYSTEM''b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''y",
+"input":"<!DOCTYPE a SYSTEM''y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''z",
+"input":"<!DOCTYPE a SYSTEM''z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''{",
+"input":"<!DOCTYPE a SYSTEM''{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM''\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a SYSTEM''\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPE a SYSTEM'(",
+"input":"<!DOCTYPE a SYSTEM'(",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "(", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'-",
+"input":"<!DOCTYPE a SYSTEM'-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "-", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'/",
+"input":"<!DOCTYPE a SYSTEM'/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "/", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'0",
+"input":"<!DOCTYPE a SYSTEM'0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "0", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'1",
+"input":"<!DOCTYPE a SYSTEM'1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "1", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'9",
+"input":"<!DOCTYPE a SYSTEM'9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "9", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'<",
+"input":"<!DOCTYPE a SYSTEM'<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "<", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'=",
+"input":"<!DOCTYPE a SYSTEM'=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "=", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'>",
+"input":"<!DOCTYPE a SYSTEM'>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'?",
+"input":"<!DOCTYPE a SYSTEM'?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "?", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'@",
+"input":"<!DOCTYPE a SYSTEM'@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "@", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'A",
+"input":"<!DOCTYPE a SYSTEM'A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "A", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'B",
+"input":"<!DOCTYPE a SYSTEM'B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "B", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'Y",
+"input":"<!DOCTYPE a SYSTEM'Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "Y", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'Z",
+"input":"<!DOCTYPE a SYSTEM'Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "Z", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'`",
+"input":"<!DOCTYPE a SYSTEM'`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "`", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'a",
+"input":"<!DOCTYPE a SYSTEM'a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "a", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'b",
+"input":"<!DOCTYPE a SYSTEM'b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "b", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'y",
+"input":"<!DOCTYPE a SYSTEM'y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "y", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'z",
+"input":"<!DOCTYPE a SYSTEM'z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "z", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'{",
+"input":"<!DOCTYPE a SYSTEM'{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "{", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM'\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a SYSTEM'\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "\uDBC0\uDC00", false]]},
+
+{"description":"<!DOCTYPE a SYSTEM(",
+"input":"<!DOCTYPE a SYSTEM(",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM-",
+"input":"<!DOCTYPE a SYSTEM-",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM/",
+"input":"<!DOCTYPE a SYSTEM/",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM0",
+"input":"<!DOCTYPE a SYSTEM0",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM1",
+"input":"<!DOCTYPE a SYSTEM1",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM9",
+"input":"<!DOCTYPE a SYSTEM9",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM<",
+"input":"<!DOCTYPE a SYSTEM<",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM=",
+"input":"<!DOCTYPE a SYSTEM=",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM>",
+"input":"<!DOCTYPE a SYSTEM>",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM?",
+"input":"<!DOCTYPE a SYSTEM?",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM@",
+"input":"<!DOCTYPE a SYSTEM@",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMA",
+"input":"<!DOCTYPE a SYSTEMA",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMB",
+"input":"<!DOCTYPE a SYSTEMB",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMY",
+"input":"<!DOCTYPE a SYSTEMY",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMZ",
+"input":"<!DOCTYPE a SYSTEMZ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM`",
+"input":"<!DOCTYPE a SYSTEM`",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMa",
+"input":"<!DOCTYPE a SYSTEMa",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMb",
+"input":"<!DOCTYPE a SYSTEMb",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMy",
+"input":"<!DOCTYPE a SYSTEMy",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEMz",
+"input":"<!DOCTYPE a SYSTEMz",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM{",
+"input":"<!DOCTYPE a SYSTEM{",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a SYSTEM\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a SYSTEM\uDBC0\uDC00",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a Y",
+"input":"<!DOCTYPE a Y",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a Z",
+"input":"<!DOCTYPE a Z",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a `",
+"input":"<!DOCTYPE a `",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a",
+"input":"<!DOCTYPE a a",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\\u0000",
+"input":"<!DOCTYPE a a\u0000",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\\u0009",
+"input":"<!DOCTYPE a a\u0009",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\\u000A",
+"input":"<!DOCTYPE a a\u000A",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\\u000B",
+"input":"<!DOCTYPE a a\u000B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\\u000C",
+"input":"<!DOCTYPE a a\u000C",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a ",
+"input":"<!DOCTYPE a a ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a!",
+"input":"<!DOCTYPE a a!",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\"",
+"input":"<!DOCTYPE a a\"",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a&",
+"input":"<!DOCTYPE a a&",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a'",
+"input":"<!DOCTYPE a a'",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a-",
+"input":"<!DOCTYPE a a-",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a/",
+"input":"<!DOCTYPE a a/",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a0",
+"input":"<!DOCTYPE a a0",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a1",
+"input":"<!DOCTYPE a a1",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a9",
+"input":"<!DOCTYPE a a9",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a<",
+"input":"<!DOCTYPE a a<",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a=",
+"input":"<!DOCTYPE a a=",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a>",
+"input":"<!DOCTYPE a a>",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a?",
+"input":"<!DOCTYPE a a?",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a@",
+"input":"<!DOCTYPE a a@",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a aA",
+"input":"<!DOCTYPE a aA",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a aB",
+"input":"<!DOCTYPE a aB",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a aY",
+"input":"<!DOCTYPE a aY",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a aZ",
+"input":"<!DOCTYPE a aZ",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a`",
+"input":"<!DOCTYPE a a`",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a aa",
+"input":"<!DOCTYPE a aa",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a ab",
+"input":"<!DOCTYPE a ab",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a ay",
+"input":"<!DOCTYPE a ay",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a az",
+"input":"<!DOCTYPE a az",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a{",
+"input":"<!DOCTYPE a a{",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a a\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a a\uDBC0\uDC00",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a b",
+"input":"<!DOCTYPE a b",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a y",
+"input":"<!DOCTYPE a y",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a z",
+"input":"<!DOCTYPE a z",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a {",
+"input":"<!DOCTYPE a {",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a \\uDBC0\\uDC00",
+"input":"<!DOCTYPE a \uDBC0\uDC00",
+"output":["ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPE a!",
+"input":"<!DOCTYPE a!",
+"output":["ParseError", ["DOCTYPE", "a!", null, null, false]]},
+
+{"description":"<!DOCTYPE a\"",
+"input":"<!DOCTYPE a\"",
+"output":["ParseError", ["DOCTYPE", "a\"", null, null, false]]},
+
+{"description":"<!DOCTYPE a&",
+"input":"<!DOCTYPE a&",
+"output":["ParseError", ["DOCTYPE", "a&", null, null, false]]},
+
+{"description":"<!DOCTYPE a'",
+"input":"<!DOCTYPE a'",
+"output":["ParseError", ["DOCTYPE", "a'", null, null, false]]},
+
+{"description":"<!DOCTYPE a-",
+"input":"<!DOCTYPE a-",
+"output":["ParseError", ["DOCTYPE", "a-", null, null, false]]},
+
+{"description":"<!DOCTYPE a/",
+"input":"<!DOCTYPE a/",
+"output":["ParseError", ["DOCTYPE", "a/", null, null, false]]},
+
+{"description":"<!DOCTYPE a0",
+"input":"<!DOCTYPE a0",
+"output":["ParseError", ["DOCTYPE", "a0", null, null, false]]},
+
+{"description":"<!DOCTYPE a1",
+"input":"<!DOCTYPE a1",
+"output":["ParseError", ["DOCTYPE", "a1", null, null, false]]},
+
+{"description":"<!DOCTYPE a9",
+"input":"<!DOCTYPE a9",
+"output":["ParseError", ["DOCTYPE", "a9", null, null, false]]},
+
+{"description":"<!DOCTYPE a<",
+"input":"<!DOCTYPE a<",
+"output":["ParseError", ["DOCTYPE", "a<", null, null, false]]},
+
+{"description":"<!DOCTYPE a=",
+"input":"<!DOCTYPE a=",
+"output":["ParseError", ["DOCTYPE", "a=", null, null, false]]},
+
+{"description":"<!DOCTYPE a>",
+"input":"<!DOCTYPE a>",
+"output":[["DOCTYPE", "a", null, null, true]]},
+
+{"description":"<!DOCTYPE a?",
+"input":"<!DOCTYPE a?",
+"output":["ParseError", ["DOCTYPE", "a?", null, null, false]]},
+
+{"description":"<!DOCTYPE a@",
+"input":"<!DOCTYPE a@",
+"output":["ParseError", ["DOCTYPE", "a@", null, null, false]]},
+
+{"description":"<!DOCTYPE aA",
+"input":"<!DOCTYPE aA",
+"output":["ParseError", ["DOCTYPE", "aa", null, null, false]]},
+
+{"description":"<!DOCTYPE aB",
+"input":"<!DOCTYPE aB",
+"output":["ParseError", ["DOCTYPE", "ab", null, null, false]]},
+
+{"description":"<!DOCTYPE aY",
+"input":"<!DOCTYPE aY",
+"output":["ParseError", ["DOCTYPE", "ay", null, null, false]]},
+
+{"description":"<!DOCTYPE aZ",
+"input":"<!DOCTYPE aZ",
+"output":["ParseError", ["DOCTYPE", "az", null, null, false]]},
+
+{"description":"<!DOCTYPE a[",
+"input":"<!DOCTYPE a[",
+"output":["ParseError", ["DOCTYPE", "a[", null, null, false]]},
+
+{"description":"<!DOCTYPE a`",
+"input":"<!DOCTYPE a`",
+"output":["ParseError", ["DOCTYPE", "a`", null, null, false]]},
+
+{"description":"<!DOCTYPE aa",
+"input":"<!DOCTYPE aa",
+"output":["ParseError", ["DOCTYPE", "aa", null, null, false]]},
+
+{"description":"<!DOCTYPE ab",
+"input":"<!DOCTYPE ab",
+"output":["ParseError", ["DOCTYPE", "ab", null, null, false]]},
+
+{"description":"<!DOCTYPE ay",
+"input":"<!DOCTYPE ay",
+"output":["ParseError", ["DOCTYPE", "ay", null, null, false]]},
+
+{"description":"<!DOCTYPE az",
+"input":"<!DOCTYPE az",
+"output":["ParseError", ["DOCTYPE", "az", null, null, false]]},
+
+{"description":"<!DOCTYPE a{",
+"input":"<!DOCTYPE a{",
+"output":["ParseError", ["DOCTYPE", "a{", null, null, false]]},
+
+{"description":"<!DOCTYPE a\\uDBC0\\uDC00",
+"input":"<!DOCTYPE a\uDBC0\uDC00",
+"output":["ParseError", ["DOCTYPE", "a\uDBC0\uDC00", null, null, false]]},
+
+{"description":"<!DOCTYPE b",
+"input":"<!DOCTYPE b",
+"output":["ParseError", ["DOCTYPE", "b", null, null, false]]},
+
+{"description":"<!DOCTYPE y",
+"input":"<!DOCTYPE y",
+"output":["ParseError", ["DOCTYPE", "y", null, null, false]]},
+
+{"description":"<!DOCTYPE z",
+"input":"<!DOCTYPE z",
+"output":["ParseError", ["DOCTYPE", "z", null, null, false]]},
+
+{"description":"<!DOCTYPE {",
+"input":"<!DOCTYPE {",
+"output":["ParseError", ["DOCTYPE", "{", null, null, false]]},
+
+{"description":"<!DOCTYPE \\uDBC0\\uDC00",
+"input":"<!DOCTYPE \uDBC0\uDC00",
+"output":["ParseError", ["DOCTYPE", "\uDBC0\uDC00", null, null, false]]},
+
+{"description":"<!DOCTYPE!",
+"input":"<!DOCTYPE!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "!", null, null, false]]},
+
+{"description":"<!DOCTYPE\"",
+"input":"<!DOCTYPE\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "\"", null, null, false]]},
+
+{"description":"<!DOCTYPE&",
+"input":"<!DOCTYPE&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "&", null, null, false]]},
+
+{"description":"<!DOCTYPE'",
+"input":"<!DOCTYPE'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "'", null, null, false]]},
+
+{"description":"<!DOCTYPE-",
+"input":"<!DOCTYPE-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "-", null, null, false]]},
+
+{"description":"<!DOCTYPE/",
+"input":"<!DOCTYPE/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "/", null, null, false]]},
+
+{"description":"<!DOCTYPE0",
+"input":"<!DOCTYPE0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "0", null, null, false]]},
+
+{"description":"<!DOCTYPE1",
+"input":"<!DOCTYPE1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "1", null, null, false]]},
+
+{"description":"<!DOCTYPE9",
+"input":"<!DOCTYPE9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "9", null, null, false]]},
+
+{"description":"<!DOCTYPE<",
+"input":"<!DOCTYPE<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "<", null, null, false]]},
+
+{"description":"<!DOCTYPE=",
+"input":"<!DOCTYPE=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "=", null, null, false]]},
+
+{"description":"<!DOCTYPE>",
+"input":"<!DOCTYPE>",
+"output":["ParseError", "ParseError", ["DOCTYPE", null, null, null, false]]},
+
+{"description":"<!DOCTYPE?",
+"input":"<!DOCTYPE?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "?", null, null, false]]},
+
+{"description":"<!DOCTYPE@",
+"input":"<!DOCTYPE@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "@", null, null, false]]},
+
+{"description":"<!DOCTYPEA",
+"input":"<!DOCTYPEA",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEB",
+"input":"<!DOCTYPEB",
+"output":["ParseError", "ParseError", ["DOCTYPE", "b", null, null, false]]},
+
+{"description":"<!DOCTYPEY",
+"input":"<!DOCTYPEY",
+"output":["ParseError", "ParseError", ["DOCTYPE", "y", null, null, false]]},
+
+{"description":"<!DOCTYPEZ",
+"input":"<!DOCTYPEZ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "z", null, null, false]]},
+
+{"description":"<!DOCTYPE`",
+"input":"<!DOCTYPE`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "`", null, null, false]]},
+
+{"description":"<!DOCTYPEa",
+"input":"<!DOCTYPEa",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u0000",
+"input":"<!DOCTYPEa\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a\uFFFD", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u0008",
+"input":"<!DOCTYPEa\u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a\u0008", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u0009",
+"input":"<!DOCTYPEa\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u000A",
+"input":"<!DOCTYPEa\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u000B",
+"input":"<!DOCTYPEa\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a\u000B", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u000C",
+"input":"<!DOCTYPEa\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u000D",
+"input":"<!DOCTYPEa\u000D",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\u001F",
+"input":"<!DOCTYPEa\u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a\u001F", null, null, false]]},
+
+{"description":"<!DOCTYPEa ",
+"input":"<!DOCTYPEa ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u0000",
+"input":"<!DOCTYPEa \u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u0008",
+"input":"<!DOCTYPEa \u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u0009",
+"input":"<!DOCTYPEa \u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u000A",
+"input":"<!DOCTYPEa \u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u000B",
+"input":"<!DOCTYPEa \u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u000C",
+"input":"<!DOCTYPEa \u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u000D",
+"input":"<!DOCTYPEa \u000D",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\u001F",
+"input":"<!DOCTYPEa \u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa ",
+"input":"<!DOCTYPEa ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa !",
+"input":"<!DOCTYPEa !",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \"",
+"input":"<!DOCTYPEa \"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa &",
+"input":"<!DOCTYPEa &",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa '",
+"input":"<!DOCTYPEa '",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa -",
+"input":"<!DOCTYPEa -",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa /",
+"input":"<!DOCTYPEa /",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa 0",
+"input":"<!DOCTYPEa 0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa 1",
+"input":"<!DOCTYPEa 1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa 9",
+"input":"<!DOCTYPEa 9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa <",
+"input":"<!DOCTYPEa <",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa =",
+"input":"<!DOCTYPEa =",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa >",
+"input":"<!DOCTYPEa >",
+"output":["ParseError", ["DOCTYPE", "a", null, null, true]]},
+
+{"description":"<!DOCTYPEa ?",
+"input":"<!DOCTYPEa ?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa @",
+"input":"<!DOCTYPEa @",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa A",
+"input":"<!DOCTYPEa A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa B",
+"input":"<!DOCTYPEa B",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC",
+"input":"<!DOCTYPEa PUBLIC",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u0000",
+"input":"<!DOCTYPEa PUBLIC\u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u0008",
+"input":"<!DOCTYPEa PUBLIC\u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u0009",
+"input":"<!DOCTYPEa PUBLIC\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u000A",
+"input":"<!DOCTYPEa PUBLIC\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u000B",
+"input":"<!DOCTYPEa PUBLIC\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u000C",
+"input":"<!DOCTYPEa PUBLIC\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u000D",
+"input":"<!DOCTYPEa PUBLIC\u000D",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\u001F",
+"input":"<!DOCTYPEa PUBLIC\u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC ",
+"input":"<!DOCTYPEa PUBLIC ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC!",
+"input":"<!DOCTYPEa PUBLIC!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"",
+"input":"<!DOCTYPEa PUBLIC\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\\u0000",
+"input":"<!DOCTYPEa PUBLIC\"\u0000",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\uFFFD", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\\u0009",
+"input":"<!DOCTYPEa PUBLIC\"\u0009",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u0009", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\\u000A",
+"input":"<!DOCTYPEa PUBLIC\"\u000A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000A", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\\u000B",
+"input":"<!DOCTYPEa PUBLIC\"\u000B",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000B", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\\u000C",
+"input":"<!DOCTYPEa PUBLIC\"\u000C",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000C", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\" ",
+"input":"<!DOCTYPEa PUBLIC\" ",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", " ", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"!",
+"input":"<!DOCTYPEa PUBLIC\"!",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "!", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\"",
+"input":"<!DOCTYPEa PUBLIC\"\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"#",
+"input":"<!DOCTYPEa PUBLIC\"#",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "#", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"&",
+"input":"<!DOCTYPEa PUBLIC\"&",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "&", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"'",
+"input":"<!DOCTYPEa PUBLIC\"'",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "'", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"-",
+"input":"<!DOCTYPEa PUBLIC\"-",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "-", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"/",
+"input":"<!DOCTYPEa PUBLIC\"/",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "/", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"0",
+"input":"<!DOCTYPEa PUBLIC\"0",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "0", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"1",
+"input":"<!DOCTYPEa PUBLIC\"1",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "1", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"9",
+"input":"<!DOCTYPEa PUBLIC\"9",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "9", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"<",
+"input":"<!DOCTYPEa PUBLIC\"<",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "<", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"=",
+"input":"<!DOCTYPEa PUBLIC\"=",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "=", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\">",
+"input":"<!DOCTYPEa PUBLIC\">",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"?",
+"input":"<!DOCTYPEa PUBLIC\"?",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "?", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"@",
+"input":"<!DOCTYPEa PUBLIC\"@",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "@", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"A",
+"input":"<!DOCTYPEa PUBLIC\"A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "A", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"B",
+"input":"<!DOCTYPEa PUBLIC\"B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "B", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"Y",
+"input":"<!DOCTYPEa PUBLIC\"Y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "Y", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"Z",
+"input":"<!DOCTYPEa PUBLIC\"Z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "Z", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"`",
+"input":"<!DOCTYPEa PUBLIC\"`",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "`", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"a",
+"input":"<!DOCTYPEa PUBLIC\"a",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "a", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"b",
+"input":"<!DOCTYPEa PUBLIC\"b",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "b", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"y",
+"input":"<!DOCTYPEa PUBLIC\"y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "y", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"z",
+"input":"<!DOCTYPEa PUBLIC\"z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "z", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"{",
+"input":"<!DOCTYPEa PUBLIC\"{",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "{", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\"\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa PUBLIC\"\uDBC0\uDC00",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\uDBC0\uDC00", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC#",
+"input":"<!DOCTYPEa PUBLIC#",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC&",
+"input":"<!DOCTYPEa PUBLIC&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'",
+"input":"<!DOCTYPEa PUBLIC'",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\\u0000",
+"input":"<!DOCTYPEa PUBLIC'\u0000",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\uFFFD", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\\u0009",
+"input":"<!DOCTYPEa PUBLIC'\u0009",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u0009", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\\u000A",
+"input":"<!DOCTYPEa PUBLIC'\u000A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000A", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\\u000B",
+"input":"<!DOCTYPEa PUBLIC'\u000B",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000B", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\\u000C",
+"input":"<!DOCTYPEa PUBLIC'\u000C",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\u000C", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC' ",
+"input":"<!DOCTYPEa PUBLIC' ",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", " ", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'!",
+"input":"<!DOCTYPEa PUBLIC'!",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "!", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\"",
+"input":"<!DOCTYPEa PUBLIC'\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\"", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'&",
+"input":"<!DOCTYPEa PUBLIC'&",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "&", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''",
+"input":"<!DOCTYPEa PUBLIC''",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u0000",
+"input":"<!DOCTYPEa PUBLIC''\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u0008",
+"input":"<!DOCTYPEa PUBLIC''\u0008",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u0009",
+"input":"<!DOCTYPEa PUBLIC''\u0009",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u000A",
+"input":"<!DOCTYPEa PUBLIC''\u000A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u000B",
+"input":"<!DOCTYPEa PUBLIC''\u000B",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u000C",
+"input":"<!DOCTYPEa PUBLIC''\u000C",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u000D",
+"input":"<!DOCTYPEa PUBLIC''\u000D",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\u001F",
+"input":"<!DOCTYPEa PUBLIC''\u001F",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'' ",
+"input":"<!DOCTYPEa PUBLIC'' ",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''!",
+"input":"<!DOCTYPEa PUBLIC''!",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\"",
+"input":"<!DOCTYPEa PUBLIC''\"",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", "", false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''#",
+"input":"<!DOCTYPEa PUBLIC''#",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''&",
+"input":"<!DOCTYPEa PUBLIC''&",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'''",
+"input":"<!DOCTYPEa PUBLIC'''",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", "", false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''(",
+"input":"<!DOCTYPEa PUBLIC''(",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''-",
+"input":"<!DOCTYPEa PUBLIC''-",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''/",
+"input":"<!DOCTYPEa PUBLIC''/",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''0",
+"input":"<!DOCTYPEa PUBLIC''0",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''1",
+"input":"<!DOCTYPEa PUBLIC''1",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''9",
+"input":"<!DOCTYPEa PUBLIC''9",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''<",
+"input":"<!DOCTYPEa PUBLIC''<",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''=",
+"input":"<!DOCTYPEa PUBLIC''=",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''>",
+"input":"<!DOCTYPEa PUBLIC''>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", "", null, true]]},
+
+{"description":"<!DOCTYPEa PUBLIC''?",
+"input":"<!DOCTYPEa PUBLIC''?",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''@",
+"input":"<!DOCTYPEa PUBLIC''@",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''A",
+"input":"<!DOCTYPEa PUBLIC''A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''B",
+"input":"<!DOCTYPEa PUBLIC''B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''Y",
+"input":"<!DOCTYPEa PUBLIC''Y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''Z",
+"input":"<!DOCTYPEa PUBLIC''Z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''`",
+"input":"<!DOCTYPEa PUBLIC''`",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''a",
+"input":"<!DOCTYPEa PUBLIC''a",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''b",
+"input":"<!DOCTYPEa PUBLIC''b",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''y",
+"input":"<!DOCTYPEa PUBLIC''y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''z",
+"input":"<!DOCTYPEa PUBLIC''z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''{",
+"input":"<!DOCTYPEa PUBLIC''{",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC''\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa PUBLIC''\uDBC0\uDC00",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'(",
+"input":"<!DOCTYPEa PUBLIC'(",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "(", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'-",
+"input":"<!DOCTYPEa PUBLIC'-",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "-", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'/",
+"input":"<!DOCTYPEa PUBLIC'/",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "/", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'0",
+"input":"<!DOCTYPEa PUBLIC'0",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "0", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'1",
+"input":"<!DOCTYPEa PUBLIC'1",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "1", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'9",
+"input":"<!DOCTYPEa PUBLIC'9",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "9", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'<",
+"input":"<!DOCTYPEa PUBLIC'<",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "<", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'=",
+"input":"<!DOCTYPEa PUBLIC'=",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "=", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'>",
+"input":"<!DOCTYPEa PUBLIC'>",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'?",
+"input":"<!DOCTYPEa PUBLIC'?",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "?", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'@",
+"input":"<!DOCTYPEa PUBLIC'@",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "@", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'A",
+"input":"<!DOCTYPEa PUBLIC'A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "A", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'B",
+"input":"<!DOCTYPEa PUBLIC'B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "B", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'Y",
+"input":"<!DOCTYPEa PUBLIC'Y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "Y", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'Z",
+"input":"<!DOCTYPEa PUBLIC'Z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "Z", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'`",
+"input":"<!DOCTYPEa PUBLIC'`",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "`", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'a",
+"input":"<!DOCTYPEa PUBLIC'a",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "a", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'b",
+"input":"<!DOCTYPEa PUBLIC'b",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "b", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'y",
+"input":"<!DOCTYPEa PUBLIC'y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "y", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'z",
+"input":"<!DOCTYPEa PUBLIC'z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "z", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'{",
+"input":"<!DOCTYPEa PUBLIC'{",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "{", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC'\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa PUBLIC'\uDBC0\uDC00",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", "\uDBC0\uDC00", null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC(",
+"input":"<!DOCTYPEa PUBLIC(",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC-",
+"input":"<!DOCTYPEa PUBLIC-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC/",
+"input":"<!DOCTYPEa PUBLIC/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC0",
+"input":"<!DOCTYPEa PUBLIC0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC1",
+"input":"<!DOCTYPEa PUBLIC1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC9",
+"input":"<!DOCTYPEa PUBLIC9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC<",
+"input":"<!DOCTYPEa PUBLIC<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC=",
+"input":"<!DOCTYPEa PUBLIC=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC>",
+"input":"<!DOCTYPEa PUBLIC>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC?",
+"input":"<!DOCTYPEa PUBLIC?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC@",
+"input":"<!DOCTYPEa PUBLIC@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICA",
+"input":"<!DOCTYPEa PUBLICA",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICB",
+"input":"<!DOCTYPEa PUBLICB",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICY",
+"input":"<!DOCTYPEa PUBLICY",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICZ",
+"input":"<!DOCTYPEa PUBLICZ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC`",
+"input":"<!DOCTYPEa PUBLIC`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICa",
+"input":"<!DOCTYPEa PUBLICa",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICb",
+"input":"<!DOCTYPEa PUBLICb",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICy",
+"input":"<!DOCTYPEa PUBLICy",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLICz",
+"input":"<!DOCTYPEa PUBLICz",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC{",
+"input":"<!DOCTYPEa PUBLIC{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa PUBLIC\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa PUBLIC\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM",
+"input":"<!DOCTYPEa SYSTEM",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u0000",
+"input":"<!DOCTYPEa SYSTEM\u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u0008",
+"input":"<!DOCTYPEa SYSTEM\u0008",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u0009",
+"input":"<!DOCTYPEa SYSTEM\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u000A",
+"input":"<!DOCTYPEa SYSTEM\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u000B",
+"input":"<!DOCTYPEa SYSTEM\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u000C",
+"input":"<!DOCTYPEa SYSTEM\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u000D",
+"input":"<!DOCTYPEa SYSTEM\u000D",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\u001F",
+"input":"<!DOCTYPEa SYSTEM\u001F",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM ",
+"input":"<!DOCTYPEa SYSTEM ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM!",
+"input":"<!DOCTYPEa SYSTEM!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"",
+"input":"<!DOCTYPEa SYSTEM\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\\u0000",
+"input":"<!DOCTYPEa SYSTEM\"\u0000",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\uFFFD", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\\u0009",
+"input":"<!DOCTYPEa SYSTEM\"\u0009",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u0009", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\\u000A",
+"input":"<!DOCTYPEa SYSTEM\"\u000A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000A", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\\u000B",
+"input":"<!DOCTYPEa SYSTEM\"\u000B",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000B", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\\u000C",
+"input":"<!DOCTYPEa SYSTEM\"\u000C",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000C", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\" ",
+"input":"<!DOCTYPEa SYSTEM\" ",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, " ", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"!",
+"input":"<!DOCTYPEa SYSTEM\"!",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "!", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\"",
+"input":"<!DOCTYPEa SYSTEM\"\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"#",
+"input":"<!DOCTYPEa SYSTEM\"#",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "#", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"&",
+"input":"<!DOCTYPEa SYSTEM\"&",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "&", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"'",
+"input":"<!DOCTYPEa SYSTEM\"'",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "'", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"-",
+"input":"<!DOCTYPEa SYSTEM\"-",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "-", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"/",
+"input":"<!DOCTYPEa SYSTEM\"/",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "/", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"0",
+"input":"<!DOCTYPEa SYSTEM\"0",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "0", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"1",
+"input":"<!DOCTYPEa SYSTEM\"1",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "1", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"9",
+"input":"<!DOCTYPEa SYSTEM\"9",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "9", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"<",
+"input":"<!DOCTYPEa SYSTEM\"<",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "<", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"=",
+"input":"<!DOCTYPEa SYSTEM\"=",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "=", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\">",
+"input":"<!DOCTYPEa SYSTEM\">",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"?",
+"input":"<!DOCTYPEa SYSTEM\"?",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "?", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"@",
+"input":"<!DOCTYPEa SYSTEM\"@",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "@", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"A",
+"input":"<!DOCTYPEa SYSTEM\"A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "A", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"B",
+"input":"<!DOCTYPEa SYSTEM\"B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "B", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"Y",
+"input":"<!DOCTYPEa SYSTEM\"Y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "Y", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"Z",
+"input":"<!DOCTYPEa SYSTEM\"Z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "Z", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"`",
+"input":"<!DOCTYPEa SYSTEM\"`",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "`", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"a",
+"input":"<!DOCTYPEa SYSTEM\"a",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "a", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"b",
+"input":"<!DOCTYPEa SYSTEM\"b",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "b", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"y",
+"input":"<!DOCTYPEa SYSTEM\"y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "y", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"z",
+"input":"<!DOCTYPEa SYSTEM\"z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "z", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"{",
+"input":"<!DOCTYPEa SYSTEM\"{",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "{", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\"\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa SYSTEM\"\uDBC0\uDC00",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\uDBC0\uDC00", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM#",
+"input":"<!DOCTYPEa SYSTEM#",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM&",
+"input":"<!DOCTYPEa SYSTEM&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'",
+"input":"<!DOCTYPEa SYSTEM'",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\\u0000",
+"input":"<!DOCTYPEa SYSTEM'\u0000",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\uFFFD", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\\u0009",
+"input":"<!DOCTYPEa SYSTEM'\u0009",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u0009", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\\u000A",
+"input":"<!DOCTYPEa SYSTEM'\u000A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000A", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\\u000B",
+"input":"<!DOCTYPEa SYSTEM'\u000B",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000B", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\\u000C",
+"input":"<!DOCTYPEa SYSTEM'\u000C",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\u000C", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM' ",
+"input":"<!DOCTYPEa SYSTEM' ",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, " ", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'!",
+"input":"<!DOCTYPEa SYSTEM'!",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "!", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\"",
+"input":"<!DOCTYPEa SYSTEM'\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\"", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'&",
+"input":"<!DOCTYPEa SYSTEM'&",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "&", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''",
+"input":"<!DOCTYPEa SYSTEM''",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u0000",
+"input":"<!DOCTYPEa SYSTEM''\u0000",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u0008",
+"input":"<!DOCTYPEa SYSTEM''\u0008",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u0009",
+"input":"<!DOCTYPEa SYSTEM''\u0009",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u000A",
+"input":"<!DOCTYPEa SYSTEM''\u000A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u000B",
+"input":"<!DOCTYPEa SYSTEM''\u000B",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u000C",
+"input":"<!DOCTYPEa SYSTEM''\u000C",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u000D",
+"input":"<!DOCTYPEa SYSTEM''\u000D",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\u001F",
+"input":"<!DOCTYPEa SYSTEM''\u001F",
+"output":["ParseError", "ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM'' ",
+"input":"<!DOCTYPEa SYSTEM'' ",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM''!",
+"input":"<!DOCTYPEa SYSTEM''!",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\"",
+"input":"<!DOCTYPEa SYSTEM''\"",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''&",
+"input":"<!DOCTYPEa SYSTEM''&",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM'''",
+"input":"<!DOCTYPEa SYSTEM'''",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''-",
+"input":"<!DOCTYPEa SYSTEM''-",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''/",
+"input":"<!DOCTYPEa SYSTEM''/",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''0",
+"input":"<!DOCTYPEa SYSTEM''0",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''1",
+"input":"<!DOCTYPEa SYSTEM''1",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''9",
+"input":"<!DOCTYPEa SYSTEM''9",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''<",
+"input":"<!DOCTYPEa SYSTEM''<",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''=",
+"input":"<!DOCTYPEa SYSTEM''=",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''>",
+"input":"<!DOCTYPEa SYSTEM''>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''?",
+"input":"<!DOCTYPEa SYSTEM''?",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''@",
+"input":"<!DOCTYPEa SYSTEM''@",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''A",
+"input":"<!DOCTYPEa SYSTEM''A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''B",
+"input":"<!DOCTYPEa SYSTEM''B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''Y",
+"input":"<!DOCTYPEa SYSTEM''Y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''Z",
+"input":"<!DOCTYPEa SYSTEM''Z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''`",
+"input":"<!DOCTYPEa SYSTEM''`",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''a",
+"input":"<!DOCTYPEa SYSTEM''a",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''b",
+"input":"<!DOCTYPEa SYSTEM''b",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''y",
+"input":"<!DOCTYPEa SYSTEM''y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''z",
+"input":"<!DOCTYPEa SYSTEM''z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''{",
+"input":"<!DOCTYPEa SYSTEM''{",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM''\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa SYSTEM''\uDBC0\uDC00",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", true]]},
+
+{"description":"<!DOCTYPEa SYSTEM'(",
+"input":"<!DOCTYPEa SYSTEM'(",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "(", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'-",
+"input":"<!DOCTYPEa SYSTEM'-",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "-", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'/",
+"input":"<!DOCTYPEa SYSTEM'/",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "/", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'0",
+"input":"<!DOCTYPEa SYSTEM'0",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "0", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'1",
+"input":"<!DOCTYPEa SYSTEM'1",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "1", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'9",
+"input":"<!DOCTYPEa SYSTEM'9",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "9", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'<",
+"input":"<!DOCTYPEa SYSTEM'<",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "<", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'=",
+"input":"<!DOCTYPEa SYSTEM'=",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "=", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'>",
+"input":"<!DOCTYPEa SYSTEM'>",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'?",
+"input":"<!DOCTYPEa SYSTEM'?",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "?", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'@",
+"input":"<!DOCTYPEa SYSTEM'@",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "@", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'A",
+"input":"<!DOCTYPEa SYSTEM'A",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "A", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'B",
+"input":"<!DOCTYPEa SYSTEM'B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "B", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'Y",
+"input":"<!DOCTYPEa SYSTEM'Y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "Y", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'Z",
+"input":"<!DOCTYPEa SYSTEM'Z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "Z", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'`",
+"input":"<!DOCTYPEa SYSTEM'`",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "`", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'a",
+"input":"<!DOCTYPEa SYSTEM'a",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "a", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'b",
+"input":"<!DOCTYPEa SYSTEM'b",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "b", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'y",
+"input":"<!DOCTYPEa SYSTEM'y",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "y", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'z",
+"input":"<!DOCTYPEa SYSTEM'z",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "z", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'{",
+"input":"<!DOCTYPEa SYSTEM'{",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "{", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM'\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa SYSTEM'\uDBC0\uDC00",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, "\uDBC0\uDC00", false]]},
+
+{"description":"<!DOCTYPEa SYSTEM(",
+"input":"<!DOCTYPEa SYSTEM(",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM-",
+"input":"<!DOCTYPEa SYSTEM-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM/",
+"input":"<!DOCTYPEa SYSTEM/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM0",
+"input":"<!DOCTYPEa SYSTEM0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM1",
+"input":"<!DOCTYPEa SYSTEM1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM9",
+"input":"<!DOCTYPEa SYSTEM9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM<",
+"input":"<!DOCTYPEa SYSTEM<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM=",
+"input":"<!DOCTYPEa SYSTEM=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM>",
+"input":"<!DOCTYPEa SYSTEM>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM?",
+"input":"<!DOCTYPEa SYSTEM?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM@",
+"input":"<!DOCTYPEa SYSTEM@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMA",
+"input":"<!DOCTYPEa SYSTEMA",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMB",
+"input":"<!DOCTYPEa SYSTEMB",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMY",
+"input":"<!DOCTYPEa SYSTEMY",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMZ",
+"input":"<!DOCTYPEa SYSTEMZ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM`",
+"input":"<!DOCTYPEa SYSTEM`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMa",
+"input":"<!DOCTYPEa SYSTEMa",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMb",
+"input":"<!DOCTYPEa SYSTEMb",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMy",
+"input":"<!DOCTYPEa SYSTEMy",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEMz",
+"input":"<!DOCTYPEa SYSTEMz",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM{",
+"input":"<!DOCTYPEa SYSTEM{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa SYSTEM\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa SYSTEM\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa Y",
+"input":"<!DOCTYPEa Y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa Z",
+"input":"<!DOCTYPEa Z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa `",
+"input":"<!DOCTYPEa `",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a",
+"input":"<!DOCTYPEa a",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\\u0000",
+"input":"<!DOCTYPEa a\u0000",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\\u0009",
+"input":"<!DOCTYPEa a\u0009",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\\u000A",
+"input":"<!DOCTYPEa a\u000A",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\\u000B",
+"input":"<!DOCTYPEa a\u000B",
+"output":["ParseError", "ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\\u000C",
+"input":"<!DOCTYPEa a\u000C",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a ",
+"input":"<!DOCTYPEa a ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a!",
+"input":"<!DOCTYPEa a!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\"",
+"input":"<!DOCTYPEa a\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a&",
+"input":"<!DOCTYPEa a&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a'",
+"input":"<!DOCTYPEa a'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a-",
+"input":"<!DOCTYPEa a-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a/",
+"input":"<!DOCTYPEa a/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a0",
+"input":"<!DOCTYPEa a0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a1",
+"input":"<!DOCTYPEa a1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a9",
+"input":"<!DOCTYPEa a9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a<",
+"input":"<!DOCTYPEa a<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a=",
+"input":"<!DOCTYPEa a=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a>",
+"input":"<!DOCTYPEa a>",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a?",
+"input":"<!DOCTYPEa a?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a@",
+"input":"<!DOCTYPEa a@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa aA",
+"input":"<!DOCTYPEa aA",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa aB",
+"input":"<!DOCTYPEa aB",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa aY",
+"input":"<!DOCTYPEa aY",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa aZ",
+"input":"<!DOCTYPEa aZ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a`",
+"input":"<!DOCTYPEa a`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa aa",
+"input":"<!DOCTYPEa aa",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa ab",
+"input":"<!DOCTYPEa ab",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa ay",
+"input":"<!DOCTYPEa ay",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa az",
+"input":"<!DOCTYPEa az",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a{",
+"input":"<!DOCTYPEa a{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa a\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa a\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa b",
+"input":"<!DOCTYPEa b",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa y",
+"input":"<!DOCTYPEa y",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa z",
+"input":"<!DOCTYPEa z",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa {",
+"input":"<!DOCTYPEa {",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa \\uDBC0\\uDC00",
+"input":"<!DOCTYPEa \uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a", null, null, false]]},
+
+{"description":"<!DOCTYPEa!",
+"input":"<!DOCTYPEa!",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a!", null, null, false]]},
+
+{"description":"<!DOCTYPEa\"",
+"input":"<!DOCTYPEa\"",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a\"", null, null, false]]},
+
+{"description":"<!DOCTYPEa&",
+"input":"<!DOCTYPEa&",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a&", null, null, false]]},
+
+{"description":"<!DOCTYPEa'",
+"input":"<!DOCTYPEa'",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a'", null, null, false]]},
+
+{"description":"<!DOCTYPEa-",
+"input":"<!DOCTYPEa-",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a-", null, null, false]]},
+
+{"description":"<!DOCTYPEa/",
+"input":"<!DOCTYPEa/",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a/", null, null, false]]},
+
+{"description":"<!DOCTYPEa0",
+"input":"<!DOCTYPEa0",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a0", null, null, false]]},
+
+{"description":"<!DOCTYPEa1",
+"input":"<!DOCTYPEa1",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a1", null, null, false]]},
+
+{"description":"<!DOCTYPEa9",
+"input":"<!DOCTYPEa9",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a9", null, null, false]]},
+
+{"description":"<!DOCTYPEa<",
+"input":"<!DOCTYPEa<",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a<", null, null, false]]},
+
+{"description":"<!DOCTYPEa=",
+"input":"<!DOCTYPEa=",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a=", null, null, false]]},
+
+{"description":"<!DOCTYPEa>",
+"input":"<!DOCTYPEa>",
+"output":["ParseError", ["DOCTYPE", "a", null, null, true]]},
+
+{"description":"<!DOCTYPEa?",
+"input":"<!DOCTYPEa?",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a?", null, null, false]]},
+
+{"description":"<!DOCTYPEa@",
+"input":"<!DOCTYPEa@",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a@", null, null, false]]},
+
+{"description":"<!DOCTYPEaA",
+"input":"<!DOCTYPEaA",
+"output":["ParseError", "ParseError", ["DOCTYPE", "aa", null, null, false]]},
+
+{"description":"<!DOCTYPEaB",
+"input":"<!DOCTYPEaB",
+"output":["ParseError", "ParseError", ["DOCTYPE", "ab", null, null, false]]},
+
+{"description":"<!DOCTYPEaY",
+"input":"<!DOCTYPEaY",
+"output":["ParseError", "ParseError", ["DOCTYPE", "ay", null, null, false]]},
+
+{"description":"<!DOCTYPEaZ",
+"input":"<!DOCTYPEaZ",
+"output":["ParseError", "ParseError", ["DOCTYPE", "az", null, null, false]]},
+
+{"description":"<!DOCTYPEa[",
+"input":"<!DOCTYPEa[",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a[", null, null, false]]},
+
+{"description":"<!DOCTYPEa`",
+"input":"<!DOCTYPEa`",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a`", null, null, false]]},
+
+{"description":"<!DOCTYPEaa",
+"input":"<!DOCTYPEaa",
+"output":["ParseError", "ParseError", ["DOCTYPE", "aa", null, null, false]]},
+
+{"description":"<!DOCTYPEab",
+"input":"<!DOCTYPEab",
+"output":["ParseError", "ParseError", ["DOCTYPE", "ab", null, null, false]]},
+
+{"description":"<!DOCTYPEay",
+"input":"<!DOCTYPEay",
+"output":["ParseError", "ParseError", ["DOCTYPE", "ay", null, null, false]]},
+
+{"description":"<!DOCTYPEaz",
+"input":"<!DOCTYPEaz",
+"output":["ParseError", "ParseError", ["DOCTYPE", "az", null, null, false]]},
+
+{"description":"<!DOCTYPEa{",
+"input":"<!DOCTYPEa{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a{", null, null, false]]},
+
+{"description":"<!DOCTYPEa\\uDBC0\\uDC00",
+"input":"<!DOCTYPEa\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "a\uDBC0\uDC00", null, null, false]]},
+
+{"description":"<!DOCTYPEb",
+"input":"<!DOCTYPEb",
+"output":["ParseError", "ParseError", ["DOCTYPE", "b", null, null, false]]},
+
+{"description":"<!DOCTYPEy",
+"input":"<!DOCTYPEy",
+"output":["ParseError", "ParseError", ["DOCTYPE", "y", null, null, false]]},
+
+{"description":"<!DOCTYPEz",
+"input":"<!DOCTYPEz",
+"output":["ParseError", "ParseError", ["DOCTYPE", "z", null, null, false]]},
+
+{"description":"<!DOCTYPE{",
+"input":"<!DOCTYPE{",
+"output":["ParseError", "ParseError", ["DOCTYPE", "{", null, null, false]]},
+
+{"description":"<!DOCTYPE\\uDBC0\\uDC00",
+"input":"<!DOCTYPE\uDBC0\uDC00",
+"output":["ParseError", "ParseError", ["DOCTYPE", "\uDBC0\uDC00", null, null, false]]},
+
+{"description":"<!Y",
+"input":"<!Y",
+"output":["ParseError", ["Comment", "Y"]]},
+
+{"description":"<!Z",
+"input":"<!Z",
+"output":["ParseError", ["Comment", "Z"]]},
+
+{"description":"<!`",
+"input":"<!`",
+"output":["ParseError", ["Comment", "`"]]},
+
+{"description":"<!a",
+"input":"<!a",
+"output":["ParseError", ["Comment", "a"]]},
+
+{"description":"<!b",
+"input":"<!b",
+"output":["ParseError", ["Comment", "b"]]},
+
+{"description":"<!y",
+"input":"<!y",
+"output":["ParseError", ["Comment", "y"]]},
+
+{"description":"<!z",
+"input":"<!z",
+"output":["ParseError", ["Comment", "z"]]},
+
+{"description":"<!{",
+"input":"<!{",
+"output":["ParseError", ["Comment", "{"]]},
+
+{"description":"<!\\uDBC0\\uDC00",
+"input":"<!\uDBC0\uDC00",
+"output":["ParseError", ["Comment", "\uDBC0\uDC00"]]},
+
+{"description":"<\"",
+"input":"<\"",
+"output":["ParseError", ["Character", "<\""]]},
+
+{"description":"<&",
+"input":"<&",
+"output":["ParseError", ["Character", "<&"]]},
+
+{"description":"<'",
+"input":"<'",
+"output":["ParseError", ["Character", "<'"]]},
+
+{"description":"<-",
+"input":"<-",
+"output":["ParseError", ["Character", "<-"]]},
+
+{"description":"<.",
+"input":"<.",
+"output":["ParseError", ["Character", "<."]]},
+
+{"description":"</",
+"input":"</",
+"output":["ParseError", ["Character", "</"]]},
+
+{"description":"</\\u0000",
+"input":"</\u0000",
+"output":["ParseError", ["Comment", "\uFFFD"]]},
+
+{"description":"</\\u0009",
+"input":"</\u0009",
+"output":["ParseError", ["Comment", "\u0009"]]},
+
+{"description":"</\\u000A",
+"input":"</\u000A",
+"output":["ParseError", ["Comment", "\u000A"]]},
+
+{"description":"</\\u000B",
+"input":"</\u000B",
+"output":["ParseError", "ParseError", ["Comment", "\u000B"]]},
+
+{"description":"</\\u000C",
+"input":"</\u000C",
+"output":["ParseError", ["Comment", "\u000C"]]},
+
+{"description":"</ ",
+"input":"</ ",
+"output":["ParseError", ["Comment", " "]]},
+
+{"description":"</!",
+"input":"</!",
+"output":["ParseError", ["Comment", "!"]]},
+
+{"description":"</\"",
+"input":"</\"",
+"output":["ParseError", ["Comment", "\""]]},
+
+{"description":"</&",
+"input":"</&",
+"output":["ParseError", ["Comment", "&"]]},
+
+{"description":"</'",
+"input":"</'",
+"output":["ParseError", ["Comment", "'"]]},
+
+{"description":"</-",
+"input":"</-",
+"output":["ParseError", ["Comment", "-"]]},
+
+{"description":"<//",
+"input":"<//",
+"output":["ParseError", ["Comment", "/"]]},
+
+{"description":"</0",
+"input":"</0",
+"output":["ParseError", ["Comment", "0"]]},
+
+{"description":"</1",
+"input":"</1",
+"output":["ParseError", ["Comment", "1"]]},
+
+{"description":"</9",
+"input":"</9",
+"output":["ParseError", ["Comment", "9"]]},
+
+{"description":"</<",
+"input":"</<",
+"output":["ParseError", ["Comment", "<"]]},
+
+{"description":"</=",
+"input":"</=",
+"output":["ParseError", ["Comment", "="]]},
+
+{"description":"</>",
+"input":"</>",
+"output":["ParseError"]},
+
+{"description":"</?",
+"input":"</?",
+"output":["ParseError", ["Comment", "?"]]},
+
+{"description":"</@",
+"input":"</@",
+"output":["ParseError", ["Comment", "@"]]},
+
+{"description":"</A>",
+"input":"</A>",
+"output":[["EndTag", "a"]]},
+
+{"description":"</B>",
+"input":"</B>",
+"output":[["EndTag", "b"]]},
+
+{"description":"</Y>",
+"input":"</Y>",
+"output":[["EndTag", "y"]]},
+
+{"description":"</Z>",
+"input":"</Z>",
+"output":[["EndTag", "z"]]},
+
+{"description":"</[",
+"input":"</[",
+"output":["ParseError", ["Comment", "["]]},
+
+{"description":"</`",
+"input":"</`",
+"output":["ParseError", ["Comment", "`"]]},
+
+{"description":"</a>",
+"input":"</a>",
+"output":[["EndTag", "a"]]},
+
+{"description":"</b>",
+"input":"</b>",
+"output":[["EndTag", "b"]]},
+
+{"description":"</y>",
+"input":"</y>",
+"output":[["EndTag", "y"]]},
+
+{"description":"</z>",
+"input":"</z>",
+"output":[["EndTag", "z"]]},
+
+{"description":"</{",
+"input":"</{",
+"output":["ParseError", ["Comment", "{"]]},
+
+{"description":"</\\uDBC0\\uDC00",
+"input":"</\uDBC0\uDC00",
+"output":["ParseError", ["Comment", "\uDBC0\uDC00"]]},
+
+{"description":"<0",
+"input":"<0",
+"output":["ParseError", ["Character", "<0"]]},
+
+{"description":"<1",
+"input":"<1",
+"output":["ParseError", ["Character", "<1"]]},
+
+{"description":"<9",
+"input":"<9",
+"output":["ParseError", ["Character", "<9"]]},
+
+{"description":"<<",
+"input":"<<",
+"output":["ParseError", ["Character", "<"], "ParseError", ["Character", "<"]]},
+
+{"description":"<=",
+"input":"<=",
+"output":["ParseError", ["Character", "<="]]},
+
+{"description":"<>",
+"input":"<>",
+"output":["ParseError", ["Character", "<>"]]},
+
+{"description":"<?",
+"input":"<?",
+"output":["ParseError", ["Comment", "?"]]},
+
+{"description":"<?\\u0000",
+"input":"<?\u0000",
+"output":["ParseError", ["Comment", "?\uFFFD"]]},
+
+{"description":"<?\\u0009",
+"input":"<?\u0009",
+"output":["ParseError", ["Comment", "?\u0009"]]},
+
+{"description":"<?\\u000A",
+"input":"<?\u000A",
+"output":["ParseError", ["Comment", "?\u000A"]]},
+
+{"description":"<?\\u000B",
+"input":"<?\u000B",
+"output":["ParseError", "ParseError", ["Comment", "?\u000B"]]},
+
+{"description":"<?\\u000C",
+"input":"<?\u000C",
+"output":["ParseError", ["Comment", "?\u000C"]]},
+
+{"description":"<? ",
+"input":"<? ",
+"output":["ParseError", ["Comment", "? "]]},
+
+{"description":"<?!",
+"input":"<?!",
+"output":["ParseError", ["Comment", "?!"]]},
+
+{"description":"<?\"",
+"input":"<?\"",
+"output":["ParseError", ["Comment", "?\""]]},
+
+{"description":"<?&",
+"input":"<?&",
+"output":["ParseError", ["Comment", "?&"]]},
+
+{"description":"<?'",
+"input":"<?'",
+"output":["ParseError", ["Comment", "?'"]]},
+
+{"description":"<?-",
+"input":"<?-",
+"output":["ParseError", ["Comment", "?-"]]},
+
+{"description":"<?/",
+"input":"<?/",
+"output":["ParseError", ["Comment", "?/"]]},
+
+{"description":"<?0",
+"input":"<?0",
+"output":["ParseError", ["Comment", "?0"]]},
+
+{"description":"<?1",
+"input":"<?1",
+"output":["ParseError", ["Comment", "?1"]]},
+
+{"description":"<?9",
+"input":"<?9",
+"output":["ParseError", ["Comment", "?9"]]},
+
+{"description":"<?<",
+"input":"<?<",
+"output":["ParseError", ["Comment", "?<"]]},
+
+{"description":"<?=",
+"input":"<?=",
+"output":["ParseError", ["Comment", "?="]]},
+
+{"description":"<?>",
+"input":"<?>",
+"output":["ParseError", ["Comment", "?"]]},
+
+{"description":"<??",
+"input":"<??",
+"output":["ParseError", ["Comment", "??"]]},
+
+{"description":"<?@",
+"input":"<?@",
+"output":["ParseError", ["Comment", "?@"]]},
+
+{"description":"<?A",
+"input":"<?A",
+"output":["ParseError", ["Comment", "?A"]]},
+
+{"description":"<?B",
+"input":"<?B",
+"output":["ParseError", ["Comment", "?B"]]},
+
+{"description":"<?Y",
+"input":"<?Y",
+"output":["ParseError", ["Comment", "?Y"]]},
+
+{"description":"<?Z",
+"input":"<?Z",
+"output":["ParseError", ["Comment", "?Z"]]},
+
+{"description":"<?`",
+"input":"<?`",
+"output":["ParseError", ["Comment", "?`"]]},
+
+{"description":"<?a",
+"input":"<?a",
+"output":["ParseError", ["Comment", "?a"]]},
+
+{"description":"<?b",
+"input":"<?b",
+"output":["ParseError", ["Comment", "?b"]]},
+
+{"description":"<?y",
+"input":"<?y",
+"output":["ParseError", ["Comment", "?y"]]},
+
+{"description":"<?z",
+"input":"<?z",
+"output":["ParseError", ["Comment", "?z"]]},
+
+{"description":"<?{",
+"input":"<?{",
+"output":["ParseError", ["Comment", "?{"]]},
+
+{"description":"<?\\uDBC0\\uDC00",
+"input":"<?\uDBC0\uDC00",
+"output":["ParseError", ["Comment", "?\uDBC0\uDC00"]]},
+
+{"description":"<@",
+"input":"<@",
+"output":["ParseError", ["Character", "<@"]]},
+
+{"description":"<A>",
+"input":"<A>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<B>",
+"input":"<B>",
+"output":[["StartTag", "b", {}]]},
+
+{"description":"<Y>",
+"input":"<Y>",
+"output":[["StartTag", "y", {}]]},
+
+{"description":"<Z>",
+"input":"<Z>",
+"output":[["StartTag", "z", {}]]},
+
+{"description":"<[",
+"input":"<[",
+"output":["ParseError", ["Character", "<["]]},
+
+{"description":"<`",
+"input":"<`",
+"output":["ParseError", ["Character", "<`"]]},
+
+{"description":"<a>",
+"input":"<a>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a\\u0000>",
+"input":"<a\u0000>",
+"output":["ParseError", ["StartTag", "a\uFFFD", {}]]},
+
+{"description":"<a\\u0008>",
+"input":"<a\u0008>",
+"output":["ParseError", ["StartTag", "a\u0008", {}]]},
+
+{"description":"<a\\u0009>",
+"input":"<a\u0009>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a\\u000A>",
+"input":"<a\u000A>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a\\u000B>",
+"input":"<a\u000B>",
+"output":["ParseError", ["StartTag", "a\u000B", {}]]},
+
+{"description":"<a\\u000C>",
+"input":"<a\u000C>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a\\u000D>",
+"input":"<a\u000D>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a\\u001F>",
+"input":"<a\u001F>",
+"output":["ParseError", ["StartTag", "a\u001F", {}]]},
+
+{"description":"<a >",
+"input":"<a >",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a \\u0000>",
+"input":"<a \u0000>",
+"output":["ParseError", ["StartTag", "a", {"\uFFFD":""}]]},
+
+{"description":"<a \\u0008>",
+"input":"<a \u0008>",
+"output":["ParseError", ["StartTag", "a", {"\u0008":""}]]},
+
+{"description":"<a \\u0009>",
+"input":"<a \u0009>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a \\u000A>",
+"input":"<a \u000A>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a \\u000B>",
+"input":"<a \u000B>",
+"output":["ParseError", ["StartTag", "a", {"\u000B":""}]]},
+
+{"description":"<a \\u000C>",
+"input":"<a \u000C>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a \\u000D>",
+"input":"<a \u000D>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a \\u001F>",
+"input":"<a \u001F>",
+"output":["ParseError", ["StartTag", "a", {"\u001F":""}]]},
+
+{"description":"<a >",
+"input":"<a >",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a !>",
+"input":"<a !>",
+"output":[["StartTag", "a", {"!":""}]]},
+
+{"description":"<a \">",
+"input":"<a \">",
+"output":["ParseError", ["StartTag", "a", {"\"":""}]]},
+
+{"description":"<a #>",
+"input":"<a #>",
+"output":[["StartTag", "a", {"#":""}]]},
+
+{"description":"<a &>",
+"input":"<a &>",
+"output":[["StartTag", "a", {"&":""}]]},
+
+{"description":"<a '>",
+"input":"<a '>",
+"output":["ParseError", ["StartTag", "a", {"'":""}]]},
+
+{"description":"<a (>",
+"input":"<a (>",
+"output":[["StartTag", "a", {"(":""}]]},
+
+{"description":"<a ->",
+"input":"<a ->",
+"output":[["StartTag", "a", {"-":""}]]},
+
+{"description":"<a .>",
+"input":"<a .>",
+"output":[["StartTag", "a", {".":""}]]},
+
+{"description":"<a />",
+"input":"<a />",
+"output":[["StartTag", "a", {}, true]]},
+
+{"description":"<a 0>",
+"input":"<a 0>",
+"output":[["StartTag", "a", {"0":""}]]},
+
+{"description":"<a 1>",
+"input":"<a 1>",
+"output":[["StartTag", "a", {"1":""}]]},
+
+{"description":"<a 9>",
+"input":"<a 9>",
+"output":[["StartTag", "a", {"9":""}]]},
+
+{"description":"<a <>",
+"input":"<a <>",
+"output":["ParseError", ["StartTag", "a", {"<":""}]]},
+
+{"description":"<a =>",
+"input":"<a =>",
+"output":["ParseError", ["StartTag", "a", {"=":""}]]},
+
+{"description":"<a >",
+"input":"<a >",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a ?>",
+"input":"<a ?>",
+"output":[["StartTag", "a", {"?":""}]]},
+
+{"description":"<a @>",
+"input":"<a @>",
+"output":[["StartTag", "a", {"@":""}]]},
+
+{"description":"<a A>",
+"input":"<a A>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a B>",
+"input":"<a B>",
+"output":[["StartTag", "a", {"b":""}]]},
+
+{"description":"<a Y>",
+"input":"<a Y>",
+"output":[["StartTag", "a", {"y":""}]]},
+
+{"description":"<a Z>",
+"input":"<a Z>",
+"output":[["StartTag", "a", {"z":""}]]},
+
+{"description":"<a [>",
+"input":"<a [>",
+"output":[["StartTag", "a", {"[":""}]]},
+
+{"description":"<a `>",
+"input":"<a `>",
+"output":[["StartTag", "a", {"`":""}]]},
+
+{"description":"<a a>",
+"input":"<a a>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a\\u0000>",
+"input":"<a a\u0000>",
+"output":["ParseError", ["StartTag", "a", {"a\uFFFD":""}]]},
+
+{"description":"<a a\\u0008>",
+"input":"<a a\u0008>",
+"output":["ParseError", ["StartTag", "a", {"a\u0008":""}]]},
+
+{"description":"<a a\\u0009>",
+"input":"<a a\u0009>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a\\u000A>",
+"input":"<a a\u000A>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a\\u000B>",
+"input":"<a a\u000B>",
+"output":["ParseError", ["StartTag", "a", {"a\u000B":""}]]},
+
+{"description":"<a a\\u000C>",
+"input":"<a a\u000C>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a\\u000D>",
+"input":"<a a\u000D>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a\\u001F>",
+"input":"<a a\u001F>",
+"output":["ParseError", ["StartTag", "a", {"a\u001F":""}]]},
+
+{"description":"<a a >",
+"input":"<a a >",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a \\u0000>",
+"input":"<a a \u0000>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "\uFFFD":""}]]},
+
+{"description":"<a a \\u0008>",
+"input":"<a a \u0008>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "\u0008":""}]]},
+
+{"description":"<a a \\u0009>",
+"input":"<a a \u0009>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a \\u000A>",
+"input":"<a a \u000A>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a \\u000B>",
+"input":"<a a \u000B>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "\u000B":""}]]},
+
+{"description":"<a a \\u000C>",
+"input":"<a a \u000C>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a \\u000D>",
+"input":"<a a \u000D>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a \\u001F>",
+"input":"<a a \u001F>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "\u001F":""}]]},
+
+{"description":"<a a >",
+"input":"<a a >",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a !>",
+"input":"<a a !>",
+"output":[["StartTag", "a", {"a":"", "!":""}]]},
+
+{"description":"<a a \">",
+"input":"<a a \">",
+"output":["ParseError", ["StartTag", "a", {"a":"", "\"":""}]]},
+
+{"description":"<a a #>",
+"input":"<a a #>",
+"output":[["StartTag", "a", {"a":"", "#":""}]]},
+
+{"description":"<a a &>",
+"input":"<a a &>",
+"output":[["StartTag", "a", {"a":"", "&":""}]]},
+
+{"description":"<a a '>",
+"input":"<a a '>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "'":""}]]},
+
+{"description":"<a a (>",
+"input":"<a a (>",
+"output":[["StartTag", "a", {"a":"", "(":""}]]},
+
+{"description":"<a a ->",
+"input":"<a a ->",
+"output":[["StartTag", "a", {"a":"", "-":""}]]},
+
+{"description":"<a a .>",
+"input":"<a a .>",
+"output":[["StartTag", "a", {"a":"", ".":""}]]},
+
+{"description":"<a a />",
+"input":"<a a />",
+"output":[["StartTag", "a", {"a":""}, true]]},
+
+{"description":"<a a 0>",
+"input":"<a a 0>",
+"output":[["StartTag", "a", {"a":"", "0":""}]]},
+
+{"description":"<a a 1>",
+"input":"<a a 1>",
+"output":[["StartTag", "a", {"a":"", "1":""}]]},
+
+{"description":"<a a 9>",
+"input":"<a a 9>",
+"output":[["StartTag", "a", {"a":"", "9":""}]]},
+
+{"description":"<a a <>",
+"input":"<a a <>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "<":""}]]},
+
+{"description":"<a a =>",
+"input":"<a a =>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a >",
+"input":"<a a >",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a ?>",
+"input":"<a a ?>",
+"output":[["StartTag", "a", {"a":"", "?":""}]]},
+
+{"description":"<a a @>",
+"input":"<a a @>",
+"output":[["StartTag", "a", {"a":"", "@":""}]]},
+
+{"description":"<a a A>",
+"input":"<a a A>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a B>",
+"input":"<a a B>",
+"output":[["StartTag", "a", {"a":"", "b":""}]]},
+
+{"description":"<a a Y>",
+"input":"<a a Y>",
+"output":[["StartTag", "a", {"a":"", "y":""}]]},
+
+{"description":"<a a Z>",
+"input":"<a a Z>",
+"output":[["StartTag", "a", {"a":"", "z":""}]]},
+
+{"description":"<a a [>",
+"input":"<a a [>",
+"output":[["StartTag", "a", {"a":"", "[":""}]]},
+
+{"description":"<a a `>",
+"input":"<a a `>",
+"output":[["StartTag", "a", {"a":"", "`":""}]]},
+
+{"description":"<a a a>",
+"input":"<a a a>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a b>",
+"input":"<a a b>",
+"output":[["StartTag", "a", {"a":"", "b":""}]]},
+
+{"description":"<a a y>",
+"input":"<a a y>",
+"output":[["StartTag", "a", {"a":"", "y":""}]]},
+
+{"description":"<a a z>",
+"input":"<a a z>",
+"output":[["StartTag", "a", {"a":"", "z":""}]]},
+
+{"description":"<a a {>",
+"input":"<a a {>",
+"output":[["StartTag", "a", {"a":"", "{":""}]]},
+
+{"description":"<a a \\uDBC0\\uDC00>",
+"input":"<a a \uDBC0\uDC00>",
+"output":[["StartTag", "a", {"a":"", "\uDBC0\uDC00":""}]]},
+
+{"description":"<a a!>",
+"input":"<a a!>",
+"output":[["StartTag", "a", {"a!":""}]]},
+
+{"description":"<a a\">",
+"input":"<a a\">",
+"output":["ParseError", ["StartTag", "a", {"a\"":""}]]},
+
+{"description":"<a a#>",
+"input":"<a a#>",
+"output":[["StartTag", "a", {"a#":""}]]},
+
+{"description":"<a a&>",
+"input":"<a a&>",
+"output":[["StartTag", "a", {"a&":""}]]},
+
+{"description":"<a a'>",
+"input":"<a a'>",
+"output":["ParseError", ["StartTag", "a", {"a'":""}]]},
+
+{"description":"<a a(>",
+"input":"<a a(>",
+"output":[["StartTag", "a", {"a(":""}]]},
+
+{"description":"<a a->",
+"input":"<a a->",
+"output":[["StartTag", "a", {"a-":""}]]},
+
+{"description":"<a a.>",
+"input":"<a a.>",
+"output":[["StartTag", "a", {"a.":""}]]},
+
+{"description":"<a a/>",
+"input":"<a a/>",
+"output":[["StartTag", "a", {"a":""}, true]]},
+
+{"description":"<a a0>",
+"input":"<a a0>",
+"output":[["StartTag", "a", {"a0":""}]]},
+
+{"description":"<a a1>",
+"input":"<a a1>",
+"output":[["StartTag", "a", {"a1":""}]]},
+
+{"description":"<a a9>",
+"input":"<a a9>",
+"output":[["StartTag", "a", {"a9":""}]]},
+
+{"description":"<a a<>",
+"input":"<a a<>",
+"output":["ParseError", ["StartTag", "a", {"a<":""}]]},
+
+{"description":"<a a=>",
+"input":"<a a=>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\\u0000>",
+"input":"<a a=\u0000>",
+"output":["ParseError", ["StartTag", "a", {"a":"\uFFFD"}]]},
+
+{"description":"<a a=\\u0008>",
+"input":"<a a=\u0008>",
+"output":["ParseError", ["StartTag", "a", {"a":"\u0008"}]]},
+
+{"description":"<a a=\\u0009>",
+"input":"<a a=\u0009>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\\u000A>",
+"input":"<a a=\u000A>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\\u000B>",
+"input":"<a a=\u000B>",
+"output":["ParseError", ["StartTag", "a", {"a":"\u000B"}]]},
+
+{"description":"<a a=\\u000C>",
+"input":"<a a=\u000C>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\\u000D>",
+"input":"<a a=\u000D>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\\u001F>",
+"input":"<a a=\u001F>",
+"output":["ParseError", ["StartTag", "a", {"a":"\u001F"}]]},
+
+{"description":"<a a= >",
+"input":"<a a= >",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=!>",
+"input":"<a a=!>",
+"output":[["StartTag", "a", {"a":"!"}]]},
+
+{"description":"<a a=\"\">",
+"input":"<a a=\"\">",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\"\\u0000\">",
+"input":"<a a=\"\u0000\">",
+"output":["ParseError", ["StartTag", "a", {"a":"\uFFFD"}]]},
+
+{"description":"<a a=\"\\u0009\">",
+"input":"<a a=\"\u0009\">",
+"output":[["StartTag", "a", {"a":"\u0009"}]]},
+
+{"description":"<a a=\"\\u000A\">",
+"input":"<a a=\"\u000A\">",
+"output":[["StartTag", "a", {"a":"\u000A"}]]},
+
+{"description":"<a a=\"\\u000B\">",
+"input":"<a a=\"\u000B\">",
+"output":["ParseError", ["StartTag", "a", {"a":"\u000B"}]]},
+
+{"description":"<a a=\"\\u000C\">",
+"input":"<a a=\"\u000C\">",
+"output":[["StartTag", "a", {"a":"\u000C"}]]},
+
+{"description":"<a a=\" \">",
+"input":"<a a=\" \">",
+"output":[["StartTag", "a", {"a":" "}]]},
+
+{"description":"<a a=\"!\">",
+"input":"<a a=\"!\">",
+"output":[["StartTag", "a", {"a":"!"}]]},
+
+{"description":"<a a=\"\">",
+"input":"<a a=\"\">",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=\"#\">",
+"input":"<a a=\"#\">",
+"output":[["StartTag", "a", {"a":"#"}]]},
+
+{"description":"<a a=\"%\">",
+"input":"<a a=\"%\">",
+"output":[["StartTag", "a", {"a":"%"}]]},
+
+{"description":"<a a=\"&\">",
+"input":"<a a=\"&\">",
+"output":[["StartTag", "a", {"a":"&"}]]},
+
+{"description":"<a a=\"'\">",
+"input":"<a a=\"'\">",
+"output":[["StartTag", "a", {"a":"'"}]]},
+
+{"description":"<a a=\"-\">",
+"input":"<a a=\"-\">",
+"output":[["StartTag", "a", {"a":"-"}]]},
+
+{"description":"<a a=\"/\">",
+"input":"<a a=\"/\">",
+"output":[["StartTag", "a", {"a":"/"}]]},
+
+{"description":"<a a=\"0\">",
+"input":"<a a=\"0\">",
+"output":[["StartTag", "a", {"a":"0"}]]},
+
+{"description":"<a a=\"1\">",
+"input":"<a a=\"1\">",
+"output":[["StartTag", "a", {"a":"1"}]]},
+
+{"description":"<a a=\"9\">",
+"input":"<a a=\"9\">",
+"output":[["StartTag", "a", {"a":"9"}]]},
+
+{"description":"<a a=\"<\">",
+"input":"<a a=\"<\">",
+"output":[["StartTag", "a", {"a":"<"}]]},
+
+{"description":"<a a=\"=\">",
+"input":"<a a=\"=\">",
+"output":[["StartTag", "a", {"a":"="}]]},
+
+{"description":"<a a=\">\">",
+"input":"<a a=\">\">",
+"output":[["StartTag", "a", {"a":">"}]]},
+
+{"description":"<a a=\"?\">",
+"input":"<a a=\"?\">",
+"output":[["StartTag", "a", {"a":"?"}]]},
+
+{"description":"<a a=\"@\">",
+"input":"<a a=\"@\">",
+"output":[["StartTag", "a", {"a":"@"}]]},
+
+{"description":"<a a=\"A\">",
+"input":"<a a=\"A\">",
+"output":[["StartTag", "a", {"a":"A"}]]},
+
+{"description":"<a a=\"B\">",
+"input":"<a a=\"B\">",
+"output":[["StartTag", "a", {"a":"B"}]]},
+
+{"description":"<a a=\"Y\">",
+"input":"<a a=\"Y\">",
+"output":[["StartTag", "a", {"a":"Y"}]]},
+
+{"description":"<a a=\"Z\">",
+"input":"<a a=\"Z\">",
+"output":[["StartTag", "a", {"a":"Z"}]]},
+
+{"description":"<a a=\"`\">",
+"input":"<a a=\"`\">",
+"output":[["StartTag", "a", {"a":"`"}]]},
+
+{"description":"<a a=\"a\">",
+"input":"<a a=\"a\">",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=\"b\">",
+"input":"<a a=\"b\">",
+"output":[["StartTag", "a", {"a":"b"}]]},
+
+{"description":"<a a=\"y\">",
+"input":"<a a=\"y\">",
+"output":[["StartTag", "a", {"a":"y"}]]},
+
+{"description":"<a a=\"z\">",
+"input":"<a a=\"z\">",
+"output":[["StartTag", "a", {"a":"z"}]]},
+
+{"description":"<a a=\"{\">",
+"input":"<a a=\"{\">",
+"output":[["StartTag", "a", {"a":"{"}]]},
+
+{"description":"<a a=\"\\uDBC0\\uDC00\">",
+"input":"<a a=\"\uDBC0\uDC00\">",
+"output":[["StartTag", "a", {"a":"\uDBC0\uDC00"}]]},
+
+{"description":"<a a=#>",
+"input":"<a a=#>",
+"output":[["StartTag", "a", {"a":"#"}]]},
+
+{"description":"<a a=%>",
+"input":"<a a=%>",
+"output":[["StartTag", "a", {"a":"%"}]]},
+
+{"description":"<a a=&>",
+"input":"<a a=&>",
+"output":[["StartTag", "a", {"a":"&"}]]},
+
+{"description":"<a a=''>",
+"input":"<a a=''>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a='\\u0000'>",
+"input":"<a a='\u0000'>",
+"output":["ParseError", ["StartTag", "a", {"a":"\uFFFD"}]]},
+
+{"description":"<a a='\\u0009'>",
+"input":"<a a='\u0009'>",
+"output":[["StartTag", "a", {"a":"\u0009"}]]},
+
+{"description":"<a a='\\u000A'>",
+"input":"<a a='\u000A'>",
+"output":[["StartTag", "a", {"a":"\u000A"}]]},
+
+{"description":"<a a='\\u000B'>",
+"input":"<a a='\u000B'>",
+"output":["ParseError", ["StartTag", "a", {"a":"\u000B"}]]},
+
+{"description":"<a a='\\u000C'>",
+"input":"<a a='\u000C'>",
+"output":[["StartTag", "a", {"a":"\u000C"}]]},
+
+{"description":"<a a=' '>",
+"input":"<a a=' '>",
+"output":[["StartTag", "a", {"a":" "}]]},
+
+{"description":"<a a='!'>",
+"input":"<a a='!'>",
+"output":[["StartTag", "a", {"a":"!"}]]},
+
+{"description":"<a a='\"'>",
+"input":"<a a='\"'>",
+"output":[["StartTag", "a", {"a":"\""}]]},
+
+{"description":"<a a='%'>",
+"input":"<a a='%'>",
+"output":[["StartTag", "a", {"a":"%"}]]},
+
+{"description":"<a a='&'>",
+"input":"<a a='&'>",
+"output":[["StartTag", "a", {"a":"&"}]]},
+
+{"description":"<a a=''>",
+"input":"<a a=''>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''\\u0000>",
+"input":"<a a=''\u0000>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "\uFFFD":""}]]},
+
+{"description":"<a a=''\\u0008>",
+"input":"<a a=''\u0008>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "\u0008":""}]]},
+
+{"description":"<a a=''\\u0009>",
+"input":"<a a=''\u0009>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''\\u000A>",
+"input":"<a a=''\u000A>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''\\u000B>",
+"input":"<a a=''\u000B>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "\u000B":""}]]},
+
+{"description":"<a a=''\\u000C>",
+"input":"<a a=''\u000C>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''\\u000D>",
+"input":"<a a=''\u000D>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''\\u001F>",
+"input":"<a a=''\u001F>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "\u001F":""}]]},
+
+{"description":"<a a='' >",
+"input":"<a a='' >",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''!>",
+"input":"<a a=''!>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "!":""}]]},
+
+{"description":"<a a=''\">",
+"input":"<a a=''\">",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "\"":""}]]},
+
+{"description":"<a a=''&>",
+"input":"<a a=''&>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "&":""}]]},
+
+{"description":"<a a='''>",
+"input":"<a a='''>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "'":""}]]},
+
+{"description":"<a a=''->",
+"input":"<a a=''->",
+"output":["ParseError", ["StartTag", "a", {"a":"", "-":""}]]},
+
+{"description":"<a a=''.>",
+"input":"<a a=''.>",
+"output":["ParseError", ["StartTag", "a", {"a":"", ".":""}]]},
+
+{"description":"<a a=''/>",
+"input":"<a a=''/>",
+"output":[["StartTag", "a", {"a":""}, true]]},
+
+{"description":"<a a=''0>",
+"input":"<a a=''0>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "0":""}]]},
+
+{"description":"<a a=''1>",
+"input":"<a a=''1>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "1":""}]]},
+
+{"description":"<a a=''9>",
+"input":"<a a=''9>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "9":""}]]},
+
+{"description":"<a a=''<>",
+"input":"<a a=''<>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "<":""}]]},
+
+{"description":"<a a=''=>",
+"input":"<a a=''=>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":"", "=":""}]]},
+
+{"description":"<a a=''>",
+"input":"<a a=''>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''?>",
+"input":"<a a=''?>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "?":""}]]},
+
+{"description":"<a a=''@>",
+"input":"<a a=''@>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "@":""}]]},
+
+{"description":"<a a=''A>",
+"input":"<a a=''A>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''B>",
+"input":"<a a=''B>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "b":""}]]},
+
+{"description":"<a a=''Y>",
+"input":"<a a=''Y>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "y":""}]]},
+
+{"description":"<a a=''Z>",
+"input":"<a a=''Z>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "z":""}]]},
+
+{"description":"<a a=''`>",
+"input":"<a a=''`>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "`":""}]]},
+
+{"description":"<a a=''a>",
+"input":"<a a=''a>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=''b>",
+"input":"<a a=''b>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "b":""}]]},
+
+{"description":"<a a=''y>",
+"input":"<a a=''y>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "y":""}]]},
+
+{"description":"<a a=''z>",
+"input":"<a a=''z>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "z":""}]]},
+
+{"description":"<a a=''{>",
+"input":"<a a=''{>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "{":""}]]},
+
+{"description":"<a a=''\\uDBC0\\uDC00>",
+"input":"<a a=''\uDBC0\uDC00>",
+"output":["ParseError", ["StartTag", "a", {"a":"", "\uDBC0\uDC00":""}]]},
+
+{"description":"<a a='('>",
+"input":"<a a='('>",
+"output":[["StartTag", "a", {"a":"("}]]},
+
+{"description":"<a a='-'>",
+"input":"<a a='-'>",
+"output":[["StartTag", "a", {"a":"-"}]]},
+
+{"description":"<a a='/'>",
+"input":"<a a='/'>",
+"output":[["StartTag", "a", {"a":"/"}]]},
+
+{"description":"<a a='0'>",
+"input":"<a a='0'>",
+"output":[["StartTag", "a", {"a":"0"}]]},
+
+{"description":"<a a='1'>",
+"input":"<a a='1'>",
+"output":[["StartTag", "a", {"a":"1"}]]},
+
+{"description":"<a a='9'>",
+"input":"<a a='9'>",
+"output":[["StartTag", "a", {"a":"9"}]]},
+
+{"description":"<a a='<'>",
+"input":"<a a='<'>",
+"output":[["StartTag", "a", {"a":"<"}]]},
+
+{"description":"<a a='='>",
+"input":"<a a='='>",
+"output":[["StartTag", "a", {"a":"="}]]},
+
+{"description":"<a a='>'>",
+"input":"<a a='>'>",
+"output":[["StartTag", "a", {"a":">"}]]},
+
+{"description":"<a a='?'>",
+"input":"<a a='?'>",
+"output":[["StartTag", "a", {"a":"?"}]]},
+
+{"description":"<a a='@'>",
+"input":"<a a='@'>",
+"output":[["StartTag", "a", {"a":"@"}]]},
+
+{"description":"<a a='A'>",
+"input":"<a a='A'>",
+"output":[["StartTag", "a", {"a":"A"}]]},
+
+{"description":"<a a='B'>",
+"input":"<a a='B'>",
+"output":[["StartTag", "a", {"a":"B"}]]},
+
+{"description":"<a a='Y'>",
+"input":"<a a='Y'>",
+"output":[["StartTag", "a", {"a":"Y"}]]},
+
+{"description":"<a a='Z'>",
+"input":"<a a='Z'>",
+"output":[["StartTag", "a", {"a":"Z"}]]},
+
+{"description":"<a a='`'>",
+"input":"<a a='`'>",
+"output":[["StartTag", "a", {"a":"`"}]]},
+
+{"description":"<a a='a'>",
+"input":"<a a='a'>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a='b'>",
+"input":"<a a='b'>",
+"output":[["StartTag", "a", {"a":"b"}]]},
+
+{"description":"<a a='y'>",
+"input":"<a a='y'>",
+"output":[["StartTag", "a", {"a":"y"}]]},
+
+{"description":"<a a='z'>",
+"input":"<a a='z'>",
+"output":[["StartTag", "a", {"a":"z"}]]},
+
+{"description":"<a a='{'>",
+"input":"<a a='{'>",
+"output":[["StartTag", "a", {"a":"{"}]]},
+
+{"description":"<a a='\\uDBC0\\uDC00'>",
+"input":"<a a='\uDBC0\uDC00'>",
+"output":[["StartTag", "a", {"a":"\uDBC0\uDC00"}]]},
+
+{"description":"<a a=(>",
+"input":"<a a=(>",
+"output":[["StartTag", "a", {"a":"("}]]},
+
+{"description":"<a a=->",
+"input":"<a a=->",
+"output":[["StartTag", "a", {"a":"-"}]]},
+
+{"description":"<a a=/>",
+"input":"<a a=/>",
+"output":[["StartTag", "a", {"a":"/"}]]},
+
+{"description":"<a a=0>",
+"input":"<a a=0>",
+"output":[["StartTag", "a", {"a":"0"}]]},
+
+{"description":"<a a=1>",
+"input":"<a a=1>",
+"output":[["StartTag", "a", {"a":"1"}]]},
+
+{"description":"<a a=9>",
+"input":"<a a=9>",
+"output":[["StartTag", "a", {"a":"9"}]]},
+
+{"description":"<a a=<>",
+"input":"<a a=<>",
+"output":["ParseError", ["StartTag", "a", {"a":"<"}]]},
+
+{"description":"<a a==>",
+"input":"<a a==>",
+"output":["ParseError", ["StartTag", "a", {"a":"="}]]},
+
+{"description":"<a a=>",
+"input":"<a a=>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a=?>",
+"input":"<a a=?>",
+"output":[["StartTag", "a", {"a":"?"}]]},
+
+{"description":"<a a=@>",
+"input":"<a a=@>",
+"output":[["StartTag", "a", {"a":"@"}]]},
+
+{"description":"<a a=A>",
+"input":"<a a=A>",
+"output":[["StartTag", "a", {"a":"A"}]]},
+
+{"description":"<a a=B>",
+"input":"<a a=B>",
+"output":[["StartTag", "a", {"a":"B"}]]},
+
+{"description":"<a a=Y>",
+"input":"<a a=Y>",
+"output":[["StartTag", "a", {"a":"Y"}]]},
+
+{"description":"<a a=Z>",
+"input":"<a a=Z>",
+"output":[["StartTag", "a", {"a":"Z"}]]},
+
+{"description":"<a a=`>",
+"input":"<a a=`>",
+"output":["ParseError", ["StartTag", "a", {"a":"`"}]]},
+
+{"description":"<a a=a>",
+"input":"<a a=a>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a\\u0000>",
+"input":"<a a=a\u0000>",
+"output":["ParseError", ["StartTag", "a", {"a":"a\uFFFD"}]]},
+
+{"description":"<a a=a\\u0008>",
+"input":"<a a=a\u0008>",
+"output":["ParseError", ["StartTag", "a", {"a":"a\u0008"}]]},
+
+{"description":"<a a=a\\u0009>",
+"input":"<a a=a\u0009>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a\\u000A>",
+"input":"<a a=a\u000A>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a\\u000B>",
+"input":"<a a=a\u000B>",
+"output":["ParseError", ["StartTag", "a", {"a":"a\u000B"}]]},
+
+{"description":"<a a=a\\u000C>",
+"input":"<a a=a\u000C>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a\\u000D>",
+"input":"<a a=a\u000D>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a\\u001F>",
+"input":"<a a=a\u001F>",
+"output":["ParseError", ["StartTag", "a", {"a":"a\u001F"}]]},
+
+{"description":"<a a=a >",
+"input":"<a a=a >",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a!>",
+"input":"<a a=a!>",
+"output":[["StartTag", "a", {"a":"a!"}]]},
+
+{"description":"<a a=a\">",
+"input":"<a a=a\">",
+"output":["ParseError", ["StartTag", "a", {"a":"a\""}]]},
+
+{"description":"<a a=a#>",
+"input":"<a a=a#>",
+"output":[["StartTag", "a", {"a":"a#"}]]},
+
+{"description":"<a a=a%>",
+"input":"<a a=a%>",
+"output":[["StartTag", "a", {"a":"a%"}]]},
+
+{"description":"<a a=a&>",
+"input":"<a a=a&>",
+"output":[["StartTag", "a", {"a":"a&"}]]},
+
+{"description":"<a a=a'>",
+"input":"<a a=a'>",
+"output":["ParseError", ["StartTag", "a", {"a":"a'"}]]},
+
+{"description":"<a a=a(>",
+"input":"<a a=a(>",
+"output":[["StartTag", "a", {"a":"a("}]]},
+
+{"description":"<a a=a->",
+"input":"<a a=a->",
+"output":[["StartTag", "a", {"a":"a-"}]]},
+
+{"description":"<a a=a/>",
+"input":"<a a=a/>",
+"output":[["StartTag", "a", {"a":"a/"}]]},
+
+{"description":"<a a=a0>",
+"input":"<a a=a0>",
+"output":[["StartTag", "a", {"a":"a0"}]]},
+
+{"description":"<a a=a1>",
+"input":"<a a=a1>",
+"output":[["StartTag", "a", {"a":"a1"}]]},
+
+{"description":"<a a=a9>",
+"input":"<a a=a9>",
+"output":[["StartTag", "a", {"a":"a9"}]]},
+
+{"description":"<a a=a<>",
+"input":"<a a=a<>",
+"output":["ParseError", ["StartTag", "a", {"a":"a<"}]]},
+
+{"description":"<a a=a=>",
+"input":"<a a=a=>",
+"output":["ParseError", ["StartTag", "a", {"a":"a="}]]},
+
+{"description":"<a a=a>",
+"input":"<a a=a>",
+"output":[["StartTag", "a", {"a":"a"}]]},
+
+{"description":"<a a=a?>",
+"input":"<a a=a?>",
+"output":[["StartTag", "a", {"a":"a?"}]]},
+
+{"description":"<a a=a@>",
+"input":"<a a=a@>",
+"output":[["StartTag", "a", {"a":"a@"}]]},
+
+{"description":"<a a=aA>",
+"input":"<a a=aA>",
+"output":[["StartTag", "a", {"a":"aA"}]]},
+
+{"description":"<a a=aB>",
+"input":"<a a=aB>",
+"output":[["StartTag", "a", {"a":"aB"}]]},
+
+{"description":"<a a=aY>",
+"input":"<a a=aY>",
+"output":[["StartTag", "a", {"a":"aY"}]]},
+
+{"description":"<a a=aZ>",
+"input":"<a a=aZ>",
+"output":[["StartTag", "a", {"a":"aZ"}]]},
+
+{"description":"<a a=a`>",
+"input":"<a a=a`>",
+"output":["ParseError", ["StartTag", "a", {"a":"a`"}]]},
+
+{"description":"<a a=aa>",
+"input":"<a a=aa>",
+"output":[["StartTag", "a", {"a":"aa"}]]},
+
+{"description":"<a a=ab>",
+"input":"<a a=ab>",
+"output":[["StartTag", "a", {"a":"ab"}]]},
+
+{"description":"<a a=ay>",
+"input":"<a a=ay>",
+"output":[["StartTag", "a", {"a":"ay"}]]},
+
+{"description":"<a a=az>",
+"input":"<a a=az>",
+"output":[["StartTag", "a", {"a":"az"}]]},
+
+{"description":"<a a=a{>",
+"input":"<a a=a{>",
+"output":[["StartTag", "a", {"a":"a{"}]]},
+
+{"description":"<a a=a\\uDBC0\\uDC00>",
+"input":"<a a=a\uDBC0\uDC00>",
+"output":[["StartTag", "a", {"a":"a\uDBC0\uDC00"}]]},
+
+{"description":"<a a=b>",
+"input":"<a a=b>",
+"output":[["StartTag", "a", {"a":"b"}]]},
+
+{"description":"<a a=y>",
+"input":"<a a=y>",
+"output":[["StartTag", "a", {"a":"y"}]]},
+
+{"description":"<a a=z>",
+"input":"<a a=z>",
+"output":[["StartTag", "a", {"a":"z"}]]},
+
+{"description":"<a a={>",
+"input":"<a a={>",
+"output":[["StartTag", "a", {"a":"{"}]]},
+
+{"description":"<a a=\\uDBC0\\uDC00>",
+"input":"<a a=\uDBC0\uDC00>",
+"output":[["StartTag", "a", {"a":"\uDBC0\uDC00"}]]},
+
+{"description":"<a a>",
+"input":"<a a>",
+"output":[["StartTag", "a", {"a":""}]]},
+
+{"description":"<a a?>",
+"input":"<a a?>",
+"output":[["StartTag", "a", {"a?":""}]]},
+
+{"description":"<a a@>",
+"input":"<a a@>",
+"output":[["StartTag", "a", {"a@":""}]]},
+
+{"description":"<a aA>",
+"input":"<a aA>",
+"output":[["StartTag", "a", {"aa":""}]]},
+
+{"description":"<a aB>",
+"input":"<a aB>",
+"output":[["StartTag", "a", {"ab":""}]]},
+
+{"description":"<a aY>",
+"input":"<a aY>",
+"output":[["StartTag", "a", {"ay":""}]]},
+
+{"description":"<a aZ>",
+"input":"<a aZ>",
+"output":[["StartTag", "a", {"az":""}]]},
+
+{"description":"<a a[>",
+"input":"<a a[>",
+"output":[["StartTag", "a", {"a[":""}]]},
+
+{"description":"<a a`>",
+"input":"<a a`>",
+"output":[["StartTag", "a", {"a`":""}]]},
+
+{"description":"<a aa>",
+"input":"<a aa>",
+"output":[["StartTag", "a", {"aa":""}]]},
+
+{"description":"<a ab>",
+"input":"<a ab>",
+"output":[["StartTag", "a", {"ab":""}]]},
+
+{"description":"<a ay>",
+"input":"<a ay>",
+"output":[["StartTag", "a", {"ay":""}]]},
+
+{"description":"<a az>",
+"input":"<a az>",
+"output":[["StartTag", "a", {"az":""}]]},
+
+{"description":"<a a{>",
+"input":"<a a{>",
+"output":[["StartTag", "a", {"a{":""}]]},
+
+{"description":"<a a\\uDBC0\\uDC00>",
+"input":"<a a\uDBC0\uDC00>",
+"output":[["StartTag", "a", {"a\uDBC0\uDC00":""}]]},
+
+{"description":"<a b>",
+"input":"<a b>",
+"output":[["StartTag", "a", {"b":""}]]},
+
+{"description":"<a y>",
+"input":"<a y>",
+"output":[["StartTag", "a", {"y":""}]]},
+
+{"description":"<a z>",
+"input":"<a z>",
+"output":[["StartTag", "a", {"z":""}]]},
+
+{"description":"<a {>",
+"input":"<a {>",
+"output":[["StartTag", "a", {"{":""}]]},
+
+{"description":"<a \\uDBC0\\uDC00>",
+"input":"<a \uDBC0\uDC00>",
+"output":[["StartTag", "a", {"\uDBC0\uDC00":""}]]},
+
+{"description":"<a!>",
+"input":"<a!>",
+"output":[["StartTag", "a!", {}]]},
+
+{"description":"<a\">",
+"input":"<a\">",
+"output":[["StartTag", "a\"", {}]]},
+
+{"description":"<a&>",
+"input":"<a&>",
+"output":[["StartTag", "a&", {}]]},
+
+{"description":"<a'>",
+"input":"<a'>",
+"output":[["StartTag", "a'", {}]]},
+
+{"description":"<a->",
+"input":"<a->",
+"output":[["StartTag", "a-", {}]]},
+
+{"description":"<a.>",
+"input":"<a.>",
+"output":[["StartTag", "a.", {}]]},
+
+{"description":"<a/>",
+"input":"<a/>",
+"output":[["StartTag", "a", {}, true]]},
+
+{"description":"<a/\\u0000>",
+"input":"<a/\u0000>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"\uFFFD":""}]]},
+
+{"description":"<a/\\u0009>",
+"input":"<a/\u0009>",
+"output":["ParseError", ["StartTag", "a", {}]]},
+
+{"description":"<a/\\u000A>",
+"input":"<a/\u000A>",
+"output":["ParseError", ["StartTag", "a", {}]]},
+
+{"description":"<a/\\u000B>",
+"input":"<a/\u000B>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"\u000B":""}]]},
+
+{"description":"<a/\\u000C>",
+"input":"<a/\u000C>",
+"output":["ParseError", ["StartTag", "a", {}]]},
+
+{"description":"<a/ >",
+"input":"<a/ >",
+"output":["ParseError", ["StartTag", "a", {}]]},
+
+{"description":"<a/!>",
+"input":"<a/!>",
+"output":["ParseError", ["StartTag", "a", {"!":""}]]},
+
+{"description":"<a/\">",
+"input":"<a/\">",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"\"":""}]]},
+
+{"description":"<a/&>",
+"input":"<a/&>",
+"output":["ParseError", ["StartTag", "a", {"&":""}]]},
+
+{"description":"<a/'>",
+"input":"<a/'>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"'":""}]]},
+
+{"description":"<a/->",
+"input":"<a/->",
+"output":["ParseError", ["StartTag", "a", {"-":""}]]},
+
+{"description":"<a//>",
+"input":"<a//>",
+"output":["ParseError", ["StartTag", "a", {}, true]]},
+
+{"description":"<a/0>",
+"input":"<a/0>",
+"output":["ParseError", ["StartTag", "a", {"0":""}]]},
+
+{"description":"<a/1>",
+"input":"<a/1>",
+"output":["ParseError", ["StartTag", "a", {"1":""}]]},
+
+{"description":"<a/9>",
+"input":"<a/9>",
+"output":["ParseError", ["StartTag", "a", {"9":""}]]},
+
+{"description":"<a/<>",
+"input":"<a/<>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"<":""}]]},
+
+{"description":"<a/=>",
+"input":"<a/=>",
+"output":["ParseError", "ParseError", ["StartTag", "a", {"=":""}]]},
+
+{"description":"<a/>",
+"input":"<a/>",
+"output":[["StartTag", "a", {}, true]]},
+
+{"description":"<a/?>",
+"input":"<a/?>",
+"output":["ParseError", ["StartTag", "a", {"?":""}]]},
+
+{"description":"<a/@>",
+"input":"<a/@>",
+"output":["ParseError", ["StartTag", "a", {"@":""}]]},
+
+{"description":"<a/A>",
+"input":"<a/A>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a/B>",
+"input":"<a/B>",
+"output":["ParseError", ["StartTag", "a", {"b":""}]]},
+
+{"description":"<a/Y>",
+"input":"<a/Y>",
+"output":["ParseError", ["StartTag", "a", {"y":""}]]},
+
+{"description":"<a/Z>",
+"input":"<a/Z>",
+"output":["ParseError", ["StartTag", "a", {"z":""}]]},
+
+{"description":"<a/`>",
+"input":"<a/`>",
+"output":["ParseError", ["StartTag", "a", {"`":""}]]},
+
+{"description":"<a/a>",
+"input":"<a/a>",
+"output":["ParseError", ["StartTag", "a", {"a":""}]]},
+
+{"description":"<a/b>",
+"input":"<a/b>",
+"output":["ParseError", ["StartTag", "a", {"b":""}]]},
+
+{"description":"<a/y>",
+"input":"<a/y>",
+"output":["ParseError", ["StartTag", "a", {"y":""}]]},
+
+{"description":"<a/z>",
+"input":"<a/z>",
+"output":["ParseError", ["StartTag", "a", {"z":""}]]},
+
+{"description":"<a/{>",
+"input":"<a/{>",
+"output":["ParseError", ["StartTag", "a", {"{":""}]]},
+
+{"description":"<a/\\uDBC0\\uDC00>",
+"input":"<a/\uDBC0\uDC00>",
+"output":["ParseError", ["StartTag", "a", {"\uDBC0\uDC00":""}]]},
+
+{"description":"<a0>",
+"input":"<a0>",
+"output":[["StartTag", "a0", {}]]},
+
+{"description":"<a1>",
+"input":"<a1>",
+"output":[["StartTag", "a1", {}]]},
+
+{"description":"<a9>",
+"input":"<a9>",
+"output":[["StartTag", "a9", {}]]},
+
+{"description":"<a<>",
+"input":"<a<>",
+"output":[["StartTag", "a<", {}]]},
+
+{"description":"<a=>",
+"input":"<a=>",
+"output":[["StartTag", "a=", {}]]},
+
+{"description":"<a>",
+"input":"<a>",
+"output":[["StartTag", "a", {}]]},
+
+{"description":"<a?>",
+"input":"<a?>",
+"output":[["StartTag", "a?", {}]]},
+
+{"description":"<a@>",
+"input":"<a@>",
+"output":[["StartTag", "a@", {}]]},
+
+{"description":"<aA>",
+"input":"<aA>",
+"output":[["StartTag", "aa", {}]]},
+
+{"description":"<aB>",
+"input":"<aB>",
+"output":[["StartTag", "ab", {}]]},
+
+{"description":"<aY>",
+"input":"<aY>",
+"output":[["StartTag", "ay", {}]]},
+
+{"description":"<aZ>",
+"input":"<aZ>",
+"output":[["StartTag", "az", {}]]},
+
+{"description":"<a[>",
+"input":"<a[>",
+"output":[["StartTag", "a[", {}]]},
+
+{"description":"<a`>",
+"input":"<a`>",
+"output":[["StartTag", "a`", {}]]},
+
+{"description":"<aa>",
+"input":"<aa>",
+"output":[["StartTag", "aa", {}]]},
+
+{"description":"<ab>",
+"input":"<ab>",
+"output":[["StartTag", "ab", {}]]},
+
+{"description":"<ay>",
+"input":"<ay>",
+"output":[["StartTag", "ay", {}]]},
+
+{"description":"<az>",
+"input":"<az>",
+"output":[["StartTag", "az", {}]]},
+
+{"description":"<a{>",
+"input":"<a{>",
+"output":[["StartTag", "a{", {}]]},
+
+{"description":"<a\\uDBC0\\uDC00>",
+"input":"<a\uDBC0\uDC00>",
+"output":[["StartTag", "a\uDBC0\uDC00", {}]]},
+
+{"description":"<b>",
+"input":"<b>",
+"output":[["StartTag", "b", {}]]},
+
+{"description":"<y>",
+"input":"<y>",
+"output":[["StartTag", "y", {}]]},
+
+{"description":"<z>",
+"input":"<z>",
+"output":[["StartTag", "z", {}]]},
+
+{"description":"<{",
+"input":"<{",
+"output":["ParseError", ["Character", "<{"]]},
+
+{"description":"<\\uDBC0\\uDC00",
+"input":"<\uDBC0\uDC00",
+"output":["ParseError", ["Character", "<\uDBC0\uDC00"]]},
+
+{"description":"=",
+"input":"=",
+"output":[["Character", "="]]},
+
+{"description":">",
+"input":">",
+"output":[["Character", ">"]]},
+
+{"description":"?",
+"input":"?",
+"output":[["Character", "?"]]},
+
+{"description":"@",
+"input":"@",
+"output":[["Character", "@"]]},
+
+{"description":"A",
+"input":"A",
+"output":[["Character", "A"]]},
+
+{"description":"B",
+"input":"B",
+"output":[["Character", "B"]]},
+
+{"description":"Y",
+"input":"Y",
+"output":[["Character", "Y"]]},
+
+{"description":"Z",
+"input":"Z",
+"output":[["Character", "Z"]]},
+
+{"description":"`",
+"input":"`",
+"output":[["Character", "`"]]},
+
+{"description":"a",
+"input":"a",
+"output":[["Character", "a"]]},
+
+{"description":"b",
+"input":"b",
+"output":[["Character", "b"]]},
+
+{"description":"y",
+"input":"y",
+"output":[["Character", "y"]]},
+
+{"description":"z",
+"input":"z",
+"output":[["Character", "z"]]},
+
+{"description":"{",
+"input":"{",
+"output":[["Character", "{"]]},
+
+{"description":"\\uDBC0\\uDC00",
+"input":"\uDBC0\uDC00",
+"output":[["Character", "\uDBC0\uDC00"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test4.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test4.test
new file mode 100644
index 000000000..4be94b0c7
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/test4.test
@@ -0,0 +1,344 @@
+{"tests": [
+
+{"description":"< in attribute name",
+"input":"<z/0 <>",
+"output":["ParseError", "ParseError", ["StartTag", "z", {"0": "", "<": ""}]]},
+
+{"description":"< in attribute value",
+"input":"<z x=<>",
+"output":["ParseError", ["StartTag", "z", {"x": "<"}]]},
+
+{"description":"= in unquoted attribute value",
+"input":"<z z=z=z>",
+"output":["ParseError", ["StartTag", "z", {"z": "z=z"}]]},
+
+{"description":"= attribute",
+"input":"<z =>",
+"output":["ParseError", ["StartTag", "z", {"=": ""}]]},
+
+{"description":"== attribute",
+"input":"<z ==>",
+"output":["ParseError", "ParseError", ["StartTag", "z", {"=": ""}]]},
+
+{"description":"=== attribute",
+"input":"<z ===>",
+"output":["ParseError", "ParseError", ["StartTag", "z", {"=": "="}]]},
+
+{"description":"==== attribute",
+"input":"<z ====>",
+"output":["ParseError", "ParseError", "ParseError", ["StartTag", "z", {"=": "=="}]]},
+
+{"description":"\" after ampersand in double-quoted attribute value",
+"input":"<z z=\"&\">",
+"output":[["StartTag", "z", {"z": "&"}]]},
+
+{"description":"' after ampersand in double-quoted attribute value",
+"input":"<z z=\"&'\">",
+"output":[["StartTag", "z", {"z": "&'"}]]},
+
+{"description":"' after ampersand in single-quoted attribute value",
+"input":"<z z='&'>",
+"output":[["StartTag", "z", {"z": "&"}]]},
+
+{"description":"\" after ampersand in single-quoted attribute value",
+"input":"<z z='&\"'>",
+"output":[["StartTag", "z", {"z": "&\""}]]},
+
+{"description":"Text after bogus character reference",
+"input":"<z z='&xlink_xmlns;'>bar<z>",
+"output":[["StartTag","z",{"z":"&xlink_xmlns;"}],["Character","bar"],["StartTag","z",{}]]},
+
+{"description":"Text after hex character reference",
+"input":"<z z='&#x0020; foo'>bar<z>",
+"output":[["StartTag","z",{"z":" foo"}],["Character","bar"],["StartTag","z",{}]]},
+
+{"description":"Attribute name starting with \"",
+"input":"<foo \"='bar'>",
+"output":["ParseError", ["StartTag", "foo", {"\"": "bar"}]]},
+
+{"description":"Attribute name starting with '",
+"input":"<foo '='bar'>",
+"output":["ParseError", ["StartTag", "foo", {"'": "bar"}]]},
+
+{"description":"Attribute name containing \"",
+"input":"<foo a\"b='bar'>",
+"output":["ParseError", ["StartTag", "foo", {"a\"b": "bar"}]]},
+
+{"description":"Attribute name containing '",
+"input":"<foo a'b='bar'>",
+"output":["ParseError", ["StartTag", "foo", {"a'b": "bar"}]]},
+
+{"description":"Unquoted attribute value containing '",
+"input":"<foo a=b'c>",
+"output":["ParseError", ["StartTag", "foo", {"a": "b'c"}]]},
+
+{"description":"Unquoted attribute value containing \"",
+"input":"<foo a=b\"c>",
+"output":["ParseError", ["StartTag", "foo", {"a": "b\"c"}]]},
+
+{"description":"Double-quoted attribute value not followed by whitespace",
+"input":"<foo a=\"b\"c>",
+"output":["ParseError", ["StartTag", "foo", {"a": "b", "c": ""}]]},
+
+{"description":"Single-quoted attribute value not followed by whitespace",
+"input":"<foo a='b'c>",
+"output":["ParseError", ["StartTag", "foo", {"a": "b", "c": ""}]]},
+
+{"description":"Quoted attribute followed by permitted /",
+"input":"<br a='b'/>",
+"output":[["StartTag","br",{"a":"b"},true]]},
+
+{"description":"Quoted attribute followed by non-permitted /",
+"input":"<bar a='b'/>",
+"output":[["StartTag","bar",{"a":"b"},true]]},
+
+{"description":"CR EOF after doctype name",
+"input":"<!doctype html \r",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"CR EOF in tag name",
+"input":"<z\r",
+"output":["ParseError"]},
+
+{"description":"Slash EOF in tag name",
+"input":"<z/",
+"output":["ParseError"]},
+
+{"description":"Zero hex numeric entity",
+"input":"&#x0",
+"output":["ParseError", "ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Zero decimal numeric entity",
+"input":"&#0",
+"output":["ParseError", "ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Zero-prefixed hex numeric entity",
+"input":"&#x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041;",
+"output":[["Character", "A"]]},
+
+{"description":"Zero-prefixed decimal numeric entity",
+"input":"&#000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000065;",
+"output":[["Character", "A"]]},
+
+{"description":"Empty hex numeric entities",
+"input":"&#x &#X ",
+"output":["ParseError", ["Character", "&#x "], "ParseError", ["Character", "&#X "]]},
+
+{"description":"Empty decimal numeric entities",
+"input":"&# &#; ",
+"output":["ParseError", ["Character", "&# "], "ParseError", ["Character", "&#; "]]},
+
+{"description":"Non-BMP numeric entity",
+"input":"&#x10000;",
+"output":[["Character", "\uD800\uDC00"]]},
+
+{"description":"Maximum non-BMP numeric entity",
+"input":"&#X10FFFF;",
+"output":["ParseError", ["Character", "\uDBFF\uDFFF"]]},
+
+{"description":"Above maximum numeric entity",
+"input":"&#x110000;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"32-bit hex numeric entity",
+"input":"&#x80000041;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"33-bit hex numeric entity",
+"input":"&#x100000041;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"33-bit decimal numeric entity",
+"input":"&#4294967361;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"65-bit hex numeric entity",
+"input":"&#x10000000000000041;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"65-bit decimal numeric entity",
+"input":"&#18446744073709551681;",
+"output":["ParseError", ["Character", "\uFFFD"]]},
+
+{"description":"Surrogate code point edge cases",
+"input":"&#xD7FF;&#xD800;&#xD801;&#xDFFE;&#xDFFF;&#xE000;",
+"output":[["Character", "\uD7FF"], "ParseError", ["Character", "\uFFFD"], "ParseError", ["Character", "\uFFFD"], "ParseError", ["Character", "\uFFFD"], "ParseError", ["Character", "\uFFFD\uE000"]]},
+
+{"description":"Uppercase start tag name",
+"input":"<X>",
+"output":[["StartTag", "x", {}]]},
+
+{"description":"Uppercase end tag name",
+"input":"</X>",
+"output":[["EndTag", "x"]]},
+
+{"description":"Uppercase attribute name",
+"input":"<x X>",
+"output":[["StartTag", "x", { "x":"" }]]},
+
+{"description":"Tag/attribute name case edge values",
+"input":"<x@AZ[`az{ @AZ[`az{>",
+"output":[["StartTag", "x@az[`az{", { "@az[`az{":"" }]]},
+
+{"description":"Duplicate different-case attributes",
+"input":"<x x=1 x=2 X=3>",
+"output":["ParseError", "ParseError", ["StartTag", "x", { "x":"1" }]]},
+
+{"description":"Uppercase close tag attributes",
+"input":"</x X>",
+"output":["ParseError", ["EndTag", "x"]]},
+
+{"description":"Duplicate close tag attributes",
+"input":"</x x x>",
+"output":["ParseError", "ParseError", ["EndTag", "x"]]},
+
+{"description":"Permitted slash",
+"input":"<br/>",
+"output":[["StartTag","br",{},true]]},
+
+{"description":"Non-permitted slash",
+"input":"<xr/>",
+"output":[["StartTag","xr",{},true]]},
+
+{"description":"Permitted slash but in close tag",
+"input":"</br/>",
+"output":["ParseError", ["EndTag", "br"]]},
+
+{"description":"Doctype public case-sensitivity (1)",
+"input":"<!DoCtYpE HtMl PuBlIc \"AbC\" \"XyZ\">",
+"output":[["DOCTYPE", "html", "AbC", "XyZ", true]]},
+
+{"description":"Doctype public case-sensitivity (2)",
+"input":"<!dOcTyPe hTmL pUbLiC \"aBc\" \"xYz\">",
+"output":[["DOCTYPE", "html", "aBc", "xYz", true]]},
+
+{"description":"Doctype system case-sensitivity (1)",
+"input":"<!DoCtYpE HtMl SyStEm \"XyZ\">",
+"output":[["DOCTYPE", "html", null, "XyZ", true]]},
+
+{"description":"Doctype system case-sensitivity (2)",
+"input":"<!dOcTyPe hTmL sYsTeM \"xYz\">",
+"output":[["DOCTYPE", "html", null, "xYz", true]]},
+
+{"description":"U+0000 in lookahead region after non-matching character",
+"input":"<!doc>\u0000",
+"output":["ParseError", ["Comment", "doc"], "ParseError", ["Character", "\u0000"]],
+"ignoreErrorOrder":true},
+
+{"description":"U+0000 in lookahead region",
+"input":"<!doc\u0000",
+"output":["ParseError", ["Comment", "doc\uFFFD"]],
+"ignoreErrorOrder":true},
+
+{"description":"U+0080 in lookahead region",
+"input":"<!doc\u0080",
+"output":["ParseError", "ParseError", ["Comment", "doc\u0080"]],
+"ignoreErrorOrder":true},
+
+{"description":"U+FDD1 in lookahead region",
+"input":"<!doc\uFDD1",
+"output":["ParseError", "ParseError", ["Comment", "doc\uFDD1"]],
+"ignoreErrorOrder":true},
+
+{"description":"U+1FFFF in lookahead region",
+"input":"<!doc\uD83F\uDFFF",
+"output":["ParseError", "ParseError", ["Comment", "doc\uD83F\uDFFF"]],
+"ignoreErrorOrder":true},
+
+{"description":"CR followed by non-LF",
+"input":"\r?",
+"output":[["Character", "\n?"]]},
+
+{"description":"CR at EOF",
+"input":"\r",
+"output":[["Character", "\n"]]},
+
+{"description":"LF at EOF",
+"input":"\n",
+"output":[["Character", "\n"]]},
+
+{"description":"CR LF",
+"input":"\r\n",
+"output":[["Character", "\n"]]},
+
+{"description":"CR CR",
+"input":"\r\r",
+"output":[["Character", "\n\n"]]},
+
+{"description":"LF LF",
+"input":"\n\n",
+"output":[["Character", "\n\n"]]},
+
+{"description":"LF CR",
+"input":"\n\r",
+"output":[["Character", "\n\n"]]},
+
+{"description":"text CR CR CR text",
+"input":"text\r\r\rtext",
+"output":[["Character", "text\n\n\ntext"]]},
+
+{"description":"Doctype publik",
+"input":"<!DOCTYPE html PUBLIK \"AbC\" \"XyZ\">",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"Doctype publi",
+"input":"<!DOCTYPE html PUBLI",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"Doctype sistem",
+"input":"<!DOCTYPE html SISTEM \"AbC\">",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"Doctype sys",
+"input":"<!DOCTYPE html SYS",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false]]},
+
+{"description":"Doctype html x>text",
+"input":"<!DOCTYPE html x>text",
+"output":["ParseError", ["DOCTYPE", "html", null, null, false], ["Character", "text"]]},
+
+{"description":"Grave accent in unquoted attribute",
+"input":"<a a=aa`>",
+"output":["ParseError", ["StartTag", "a", {"a":"aa`"}]]},
+
+{"description":"EOF in tag name state ",
+"input":"<a",
+"output":["ParseError"]},
+
+{"description":"EOF in tag name state",
+"input":"<a",
+"output":["ParseError"]},
+
+{"description":"EOF in before attribute name state",
+"input":"<a ",
+"output":["ParseError"]},
+
+{"description":"EOF in attribute name state",
+"input":"<a a",
+"output":["ParseError"]},
+
+{"description":"EOF in after attribute name state",
+"input":"<a a ",
+"output":["ParseError"]},
+
+{"description":"EOF in before attribute value state",
+"input":"<a a =",
+"output":["ParseError"]},
+
+{"description":"EOF in attribute value (double quoted) state",
+"input":"<a a =\"a",
+"output":["ParseError"]},
+
+{"description":"EOF in attribute value (single quoted) state",
+"input":"<a a ='a",
+"output":["ParseError"]},
+
+{"description":"EOF in attribute value (unquoted) state",
+"input":"<a a =a",
+"output":["ParseError"]},
+
+{"description":"EOF in after attribute value state",
+"input":"<a a ='a'",
+"output":["ParseError"]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeChars.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeChars.test
new file mode 100644
index 000000000..c7786682c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeChars.test
@@ -0,0 +1,1295 @@
+{"tests": [
+
+{"description": "Invalid Unicode character U+0001",
+"input": "\u0001",
+"output": ["ParseError", ["Character", "\u0001"]]},
+
+{"description": "Invalid Unicode character U+0002",
+"input": "\u0002",
+"output": ["ParseError", ["Character", "\u0002"]]},
+
+{"description": "Invalid Unicode character U+0003",
+"input": "\u0003",
+"output": ["ParseError", ["Character", "\u0003"]]},
+
+{"description": "Invalid Unicode character U+0004",
+"input": "\u0004",
+"output": ["ParseError", ["Character", "\u0004"]]},
+
+{"description": "Invalid Unicode character U+0005",
+"input": "\u0005",
+"output": ["ParseError", ["Character", "\u0005"]]},
+
+{"description": "Invalid Unicode character U+0006",
+"input": "\u0006",
+"output": ["ParseError", ["Character", "\u0006"]]},
+
+{"description": "Invalid Unicode character U+0007",
+"input": "\u0007",
+"output": ["ParseError", ["Character", "\u0007"]]},
+
+{"description": "Invalid Unicode character U+0008",
+"input": "\u0008",
+"output": ["ParseError", ["Character", "\u0008"]]},
+
+{"description": "Invalid Unicode character U+000B",
+"input": "\u000B",
+"output": ["ParseError", ["Character", "\u000B"]]},
+
+{"description": "Invalid Unicode character U+000E",
+"input": "\u000E",
+"output": ["ParseError", ["Character", "\u000E"]]},
+
+{"description": "Invalid Unicode character U+000F",
+"input": "\u000F",
+"output": ["ParseError", ["Character", "\u000F"]]},
+
+{"description": "Invalid Unicode character U+0010",
+"input": "\u0010",
+"output": ["ParseError", ["Character", "\u0010"]]},
+
+{"description": "Invalid Unicode character U+0011",
+"input": "\u0011",
+"output": ["ParseError", ["Character", "\u0011"]]},
+
+{"description": "Invalid Unicode character U+0012",
+"input": "\u0012",
+"output": ["ParseError", ["Character", "\u0012"]]},
+
+{"description": "Invalid Unicode character U+0013",
+"input": "\u0013",
+"output": ["ParseError", ["Character", "\u0013"]]},
+
+{"description": "Invalid Unicode character U+0014",
+"input": "\u0014",
+"output": ["ParseError", ["Character", "\u0014"]]},
+
+{"description": "Invalid Unicode character U+0015",
+"input": "\u0015",
+"output": ["ParseError", ["Character", "\u0015"]]},
+
+{"description": "Invalid Unicode character U+0016",
+"input": "\u0016",
+"output": ["ParseError", ["Character", "\u0016"]]},
+
+{"description": "Invalid Unicode character U+0017",
+"input": "\u0017",
+"output": ["ParseError", ["Character", "\u0017"]]},
+
+{"description": "Invalid Unicode character U+0018",
+"input": "\u0018",
+"output": ["ParseError", ["Character", "\u0018"]]},
+
+{"description": "Invalid Unicode character U+0019",
+"input": "\u0019",
+"output": ["ParseError", ["Character", "\u0019"]]},
+
+{"description": "Invalid Unicode character U+001A",
+"input": "\u001A",
+"output": ["ParseError", ["Character", "\u001A"]]},
+
+{"description": "Invalid Unicode character U+001B",
+"input": "\u001B",
+"output": ["ParseError", ["Character", "\u001B"]]},
+
+{"description": "Invalid Unicode character U+001C",
+"input": "\u001C",
+"output": ["ParseError", ["Character", "\u001C"]]},
+
+{"description": "Invalid Unicode character U+001D",
+"input": "\u001D",
+"output": ["ParseError", ["Character", "\u001D"]]},
+
+{"description": "Invalid Unicode character U+001E",
+"input": "\u001E",
+"output": ["ParseError", ["Character", "\u001E"]]},
+
+{"description": "Invalid Unicode character U+001F",
+"input": "\u001F",
+"output": ["ParseError", ["Character", "\u001F"]]},
+
+{"description": "Invalid Unicode character U+007F",
+"input": "\u007F",
+"output": ["ParseError", ["Character", "\u007F"]]},
+
+{"description": "Invalid Unicode character U+FDD0",
+"input": "\uFDD0",
+"output": ["ParseError", ["Character", "\uFDD0"]]},
+
+{"description": "Invalid Unicode character U+FDD1",
+"input": "\uFDD1",
+"output": ["ParseError", ["Character", "\uFDD1"]]},
+
+{"description": "Invalid Unicode character U+FDD2",
+"input": "\uFDD2",
+"output": ["ParseError", ["Character", "\uFDD2"]]},
+
+{"description": "Invalid Unicode character U+FDD3",
+"input": "\uFDD3",
+"output": ["ParseError", ["Character", "\uFDD3"]]},
+
+{"description": "Invalid Unicode character U+FDD4",
+"input": "\uFDD4",
+"output": ["ParseError", ["Character", "\uFDD4"]]},
+
+{"description": "Invalid Unicode character U+FDD5",
+"input": "\uFDD5",
+"output": ["ParseError", ["Character", "\uFDD5"]]},
+
+{"description": "Invalid Unicode character U+FDD6",
+"input": "\uFDD6",
+"output": ["ParseError", ["Character", "\uFDD6"]]},
+
+{"description": "Invalid Unicode character U+FDD7",
+"input": "\uFDD7",
+"output": ["ParseError", ["Character", "\uFDD7"]]},
+
+{"description": "Invalid Unicode character U+FDD8",
+"input": "\uFDD8",
+"output": ["ParseError", ["Character", "\uFDD8"]]},
+
+{"description": "Invalid Unicode character U+FDD9",
+"input": "\uFDD9",
+"output": ["ParseError", ["Character", "\uFDD9"]]},
+
+{"description": "Invalid Unicode character U+FDDA",
+"input": "\uFDDA",
+"output": ["ParseError", ["Character", "\uFDDA"]]},
+
+{"description": "Invalid Unicode character U+FDDB",
+"input": "\uFDDB",
+"output": ["ParseError", ["Character", "\uFDDB"]]},
+
+{"description": "Invalid Unicode character U+FDDC",
+"input": "\uFDDC",
+"output": ["ParseError", ["Character", "\uFDDC"]]},
+
+{"description": "Invalid Unicode character U+FDDD",
+"input": "\uFDDD",
+"output": ["ParseError", ["Character", "\uFDDD"]]},
+
+{"description": "Invalid Unicode character U+FDDE",
+"input": "\uFDDE",
+"output": ["ParseError", ["Character", "\uFDDE"]]},
+
+{"description": "Invalid Unicode character U+FDDF",
+"input": "\uFDDF",
+"output": ["ParseError", ["Character", "\uFDDF"]]},
+
+{"description": "Invalid Unicode character U+FDE0",
+"input": "\uFDE0",
+"output": ["ParseError", ["Character", "\uFDE0"]]},
+
+{"description": "Invalid Unicode character U+FDE1",
+"input": "\uFDE1",
+"output": ["ParseError", ["Character", "\uFDE1"]]},
+
+{"description": "Invalid Unicode character U+FDE2",
+"input": "\uFDE2",
+"output": ["ParseError", ["Character", "\uFDE2"]]},
+
+{"description": "Invalid Unicode character U+FDE3",
+"input": "\uFDE3",
+"output": ["ParseError", ["Character", "\uFDE3"]]},
+
+{"description": "Invalid Unicode character U+FDE4",
+"input": "\uFDE4",
+"output": ["ParseError", ["Character", "\uFDE4"]]},
+
+{"description": "Invalid Unicode character U+FDE5",
+"input": "\uFDE5",
+"output": ["ParseError", ["Character", "\uFDE5"]]},
+
+{"description": "Invalid Unicode character U+FDE6",
+"input": "\uFDE6",
+"output": ["ParseError", ["Character", "\uFDE6"]]},
+
+{"description": "Invalid Unicode character U+FDE7",
+"input": "\uFDE7",
+"output": ["ParseError", ["Character", "\uFDE7"]]},
+
+{"description": "Invalid Unicode character U+FDE8",
+"input": "\uFDE8",
+"output": ["ParseError", ["Character", "\uFDE8"]]},
+
+{"description": "Invalid Unicode character U+FDE9",
+"input": "\uFDE9",
+"output": ["ParseError", ["Character", "\uFDE9"]]},
+
+{"description": "Invalid Unicode character U+FDEA",
+"input": "\uFDEA",
+"output": ["ParseError", ["Character", "\uFDEA"]]},
+
+{"description": "Invalid Unicode character U+FDEB",
+"input": "\uFDEB",
+"output": ["ParseError", ["Character", "\uFDEB"]]},
+
+{"description": "Invalid Unicode character U+FDEC",
+"input": "\uFDEC",
+"output": ["ParseError", ["Character", "\uFDEC"]]},
+
+{"description": "Invalid Unicode character U+FDED",
+"input": "\uFDED",
+"output": ["ParseError", ["Character", "\uFDED"]]},
+
+{"description": "Invalid Unicode character U+FDEE",
+"input": "\uFDEE",
+"output": ["ParseError", ["Character", "\uFDEE"]]},
+
+{"description": "Invalid Unicode character U+FDEF",
+"input": "\uFDEF",
+"output": ["ParseError", ["Character", "\uFDEF"]]},
+
+{"description": "Invalid Unicode character U+FFFE",
+"input": "\uFFFE",
+"output": ["ParseError", ["Character", "\uFFFE"]]},
+
+{"description": "Invalid Unicode character U+FFFF",
+"input": "\uFFFF",
+"output": ["ParseError", ["Character", "\uFFFF"]]},
+
+{"description": "Invalid Unicode character U+1FFFE",
+"input": "\uD83F\uDFFE",
+"output": ["ParseError", ["Character", "\uD83F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+1FFFF",
+"input": "\uD83F\uDFFF",
+"output": ["ParseError", ["Character", "\uD83F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+2FFFE",
+"input": "\uD87F\uDFFE",
+"output": ["ParseError", ["Character", "\uD87F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+2FFFF",
+"input": "\uD87F\uDFFF",
+"output": ["ParseError", ["Character", "\uD87F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+3FFFE",
+"input": "\uD8BF\uDFFE",
+"output": ["ParseError", ["Character", "\uD8BF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+3FFFF",
+"input": "\uD8BF\uDFFF",
+"output": ["ParseError", ["Character", "\uD8BF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+4FFFE",
+"input": "\uD8FF\uDFFE",
+"output": ["ParseError", ["Character", "\uD8FF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+4FFFF",
+"input": "\uD8FF\uDFFF",
+"output": ["ParseError", ["Character", "\uD8FF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+5FFFE",
+"input": "\uD93F\uDFFE",
+"output": ["ParseError", ["Character", "\uD93F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+5FFFF",
+"input": "\uD93F\uDFFF",
+"output": ["ParseError", ["Character", "\uD93F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+6FFFE",
+"input": "\uD97F\uDFFE",
+"output": ["ParseError", ["Character", "\uD97F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+6FFFF",
+"input": "\uD97F\uDFFF",
+"output": ["ParseError", ["Character", "\uD97F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+7FFFE",
+"input": "\uD9BF\uDFFE",
+"output": ["ParseError", ["Character", "\uD9BF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+7FFFF",
+"input": "\uD9BF\uDFFF",
+"output": ["ParseError", ["Character", "\uD9BF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+8FFFE",
+"input": "\uD9FF\uDFFE",
+"output": ["ParseError", ["Character", "\uD9FF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+8FFFF",
+"input": "\uD9FF\uDFFF",
+"output": ["ParseError", ["Character", "\uD9FF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+9FFFE",
+"input": "\uDA3F\uDFFE",
+"output": ["ParseError", ["Character", "\uDA3F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+9FFFF",
+"input": "\uDA3F\uDFFF",
+"output": ["ParseError", ["Character", "\uDA3F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+AFFFE",
+"input": "\uDA7F\uDFFE",
+"output": ["ParseError", ["Character", "\uDA7F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+AFFFF",
+"input": "\uDA7F\uDFFF",
+"output": ["ParseError", ["Character", "\uDA7F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+BFFFE",
+"input": "\uDABF\uDFFE",
+"output": ["ParseError", ["Character", "\uDABF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+BFFFF",
+"input": "\uDABF\uDFFF",
+"output": ["ParseError", ["Character", "\uDABF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+CFFFE",
+"input": "\uDAFF\uDFFE",
+"output": ["ParseError", ["Character", "\uDAFF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+CFFFF",
+"input": "\uDAFF\uDFFF",
+"output": ["ParseError", ["Character", "\uDAFF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+DFFFE",
+"input": "\uDB3F\uDFFE",
+"output": ["ParseError", ["Character", "\uDB3F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+DFFFF",
+"input": "\uDB3F\uDFFF",
+"output": ["ParseError", ["Character", "\uDB3F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+EFFFE",
+"input": "\uDB7F\uDFFE",
+"output": ["ParseError", ["Character", "\uDB7F\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+EFFFF",
+"input": "\uDB7F\uDFFF",
+"output": ["ParseError", ["Character", "\uDB7F\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+FFFFE",
+"input": "\uDBBF\uDFFE",
+"output": ["ParseError", ["Character", "\uDBBF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+FFFFF",
+"input": "\uDBBF\uDFFF",
+"output": ["ParseError", ["Character", "\uDBBF\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+10FFFE",
+"input": "\uDBFF\uDFFE",
+"output": ["ParseError", ["Character", "\uDBFF\uDFFE"]]},
+
+{"description": "Invalid Unicode character U+10FFFF",
+"input": "\uDBFF\uDFFF",
+"output": ["ParseError", ["Character", "\uDBFF\uDFFF"]]},
+
+{"description": "Valid Unicode character U+0009",
+"input": "\u0009",
+"output": [["Character", "\u0009"]]},
+
+{"description": "Valid Unicode character U+000A",
+"input": "\u000A",
+"output": [["Character", "\u000A"]]},
+
+{"description": "Valid Unicode character U+0020",
+"input": "\u0020",
+"output": [["Character", "\u0020"]]},
+
+{"description": "Valid Unicode character U+0021",
+"input": "\u0021",
+"output": [["Character", "\u0021"]]},
+
+{"description": "Valid Unicode character U+0022",
+"input": "\u0022",
+"output": [["Character", "\u0022"]]},
+
+{"description": "Valid Unicode character U+0023",
+"input": "\u0023",
+"output": [["Character", "\u0023"]]},
+
+{"description": "Valid Unicode character U+0024",
+"input": "\u0024",
+"output": [["Character", "\u0024"]]},
+
+{"description": "Valid Unicode character U+0025",
+"input": "\u0025",
+"output": [["Character", "\u0025"]]},
+
+{"description": "Valid Unicode character U+0026",
+"input": "\u0026",
+"output": [["Character", "\u0026"]]},
+
+{"description": "Valid Unicode character U+0027",
+"input": "\u0027",
+"output": [["Character", "\u0027"]]},
+
+{"description": "Valid Unicode character U+0028",
+"input": "\u0028",
+"output": [["Character", "\u0028"]]},
+
+{"description": "Valid Unicode character U+0029",
+"input": "\u0029",
+"output": [["Character", "\u0029"]]},
+
+{"description": "Valid Unicode character U+002A",
+"input": "\u002A",
+"output": [["Character", "\u002A"]]},
+
+{"description": "Valid Unicode character U+002B",
+"input": "\u002B",
+"output": [["Character", "\u002B"]]},
+
+{"description": "Valid Unicode character U+002C",
+"input": "\u002C",
+"output": [["Character", "\u002C"]]},
+
+{"description": "Valid Unicode character U+002D",
+"input": "\u002D",
+"output": [["Character", "\u002D"]]},
+
+{"description": "Valid Unicode character U+002E",
+"input": "\u002E",
+"output": [["Character", "\u002E"]]},
+
+{"description": "Valid Unicode character U+002F",
+"input": "\u002F",
+"output": [["Character", "\u002F"]]},
+
+{"description": "Valid Unicode character U+0030",
+"input": "\u0030",
+"output": [["Character", "\u0030"]]},
+
+{"description": "Valid Unicode character U+0031",
+"input": "\u0031",
+"output": [["Character", "\u0031"]]},
+
+{"description": "Valid Unicode character U+0032",
+"input": "\u0032",
+"output": [["Character", "\u0032"]]},
+
+{"description": "Valid Unicode character U+0033",
+"input": "\u0033",
+"output": [["Character", "\u0033"]]},
+
+{"description": "Valid Unicode character U+0034",
+"input": "\u0034",
+"output": [["Character", "\u0034"]]},
+
+{"description": "Valid Unicode character U+0035",
+"input": "\u0035",
+"output": [["Character", "\u0035"]]},
+
+{"description": "Valid Unicode character U+0036",
+"input": "\u0036",
+"output": [["Character", "\u0036"]]},
+
+{"description": "Valid Unicode character U+0037",
+"input": "\u0037",
+"output": [["Character", "\u0037"]]},
+
+{"description": "Valid Unicode character U+0038",
+"input": "\u0038",
+"output": [["Character", "\u0038"]]},
+
+{"description": "Valid Unicode character U+0039",
+"input": "\u0039",
+"output": [["Character", "\u0039"]]},
+
+{"description": "Valid Unicode character U+003A",
+"input": "\u003A",
+"output": [["Character", "\u003A"]]},
+
+{"description": "Valid Unicode character U+003B",
+"input": "\u003B",
+"output": [["Character", "\u003B"]]},
+
+{"description": "Valid Unicode character U+003D",
+"input": "\u003D",
+"output": [["Character", "\u003D"]]},
+
+{"description": "Valid Unicode character U+003E",
+"input": "\u003E",
+"output": [["Character", "\u003E"]]},
+
+{"description": "Valid Unicode character U+003F",
+"input": "\u003F",
+"output": [["Character", "\u003F"]]},
+
+{"description": "Valid Unicode character U+0040",
+"input": "\u0040",
+"output": [["Character", "\u0040"]]},
+
+{"description": "Valid Unicode character U+0041",
+"input": "\u0041",
+"output": [["Character", "\u0041"]]},
+
+{"description": "Valid Unicode character U+0042",
+"input": "\u0042",
+"output": [["Character", "\u0042"]]},
+
+{"description": "Valid Unicode character U+0043",
+"input": "\u0043",
+"output": [["Character", "\u0043"]]},
+
+{"description": "Valid Unicode character U+0044",
+"input": "\u0044",
+"output": [["Character", "\u0044"]]},
+
+{"description": "Valid Unicode character U+0045",
+"input": "\u0045",
+"output": [["Character", "\u0045"]]},
+
+{"description": "Valid Unicode character U+0046",
+"input": "\u0046",
+"output": [["Character", "\u0046"]]},
+
+{"description": "Valid Unicode character U+0047",
+"input": "\u0047",
+"output": [["Character", "\u0047"]]},
+
+{"description": "Valid Unicode character U+0048",
+"input": "\u0048",
+"output": [["Character", "\u0048"]]},
+
+{"description": "Valid Unicode character U+0049",
+"input": "\u0049",
+"output": [["Character", "\u0049"]]},
+
+{"description": "Valid Unicode character U+004A",
+"input": "\u004A",
+"output": [["Character", "\u004A"]]},
+
+{"description": "Valid Unicode character U+004B",
+"input": "\u004B",
+"output": [["Character", "\u004B"]]},
+
+{"description": "Valid Unicode character U+004C",
+"input": "\u004C",
+"output": [["Character", "\u004C"]]},
+
+{"description": "Valid Unicode character U+004D",
+"input": "\u004D",
+"output": [["Character", "\u004D"]]},
+
+{"description": "Valid Unicode character U+004E",
+"input": "\u004E",
+"output": [["Character", "\u004E"]]},
+
+{"description": "Valid Unicode character U+004F",
+"input": "\u004F",
+"output": [["Character", "\u004F"]]},
+
+{"description": "Valid Unicode character U+0050",
+"input": "\u0050",
+"output": [["Character", "\u0050"]]},
+
+{"description": "Valid Unicode character U+0051",
+"input": "\u0051",
+"output": [["Character", "\u0051"]]},
+
+{"description": "Valid Unicode character U+0052",
+"input": "\u0052",
+"output": [["Character", "\u0052"]]},
+
+{"description": "Valid Unicode character U+0053",
+"input": "\u0053",
+"output": [["Character", "\u0053"]]},
+
+{"description": "Valid Unicode character U+0054",
+"input": "\u0054",
+"output": [["Character", "\u0054"]]},
+
+{"description": "Valid Unicode character U+0055",
+"input": "\u0055",
+"output": [["Character", "\u0055"]]},
+
+{"description": "Valid Unicode character U+0056",
+"input": "\u0056",
+"output": [["Character", "\u0056"]]},
+
+{"description": "Valid Unicode character U+0057",
+"input": "\u0057",
+"output": [["Character", "\u0057"]]},
+
+{"description": "Valid Unicode character U+0058",
+"input": "\u0058",
+"output": [["Character", "\u0058"]]},
+
+{"description": "Valid Unicode character U+0059",
+"input": "\u0059",
+"output": [["Character", "\u0059"]]},
+
+{"description": "Valid Unicode character U+005A",
+"input": "\u005A",
+"output": [["Character", "\u005A"]]},
+
+{"description": "Valid Unicode character U+005B",
+"input": "\u005B",
+"output": [["Character", "\u005B"]]},
+
+{"description": "Valid Unicode character U+005C",
+"input": "\u005C",
+"output": [["Character", "\u005C"]]},
+
+{"description": "Valid Unicode character U+005D",
+"input": "\u005D",
+"output": [["Character", "\u005D"]]},
+
+{"description": "Valid Unicode character U+005E",
+"input": "\u005E",
+"output": [["Character", "\u005E"]]},
+
+{"description": "Valid Unicode character U+005F",
+"input": "\u005F",
+"output": [["Character", "\u005F"]]},
+
+{"description": "Valid Unicode character U+0060",
+"input": "\u0060",
+"output": [["Character", "\u0060"]]},
+
+{"description": "Valid Unicode character U+0061",
+"input": "\u0061",
+"output": [["Character", "\u0061"]]},
+
+{"description": "Valid Unicode character U+0062",
+"input": "\u0062",
+"output": [["Character", "\u0062"]]},
+
+{"description": "Valid Unicode character U+0063",
+"input": "\u0063",
+"output": [["Character", "\u0063"]]},
+
+{"description": "Valid Unicode character U+0064",
+"input": "\u0064",
+"output": [["Character", "\u0064"]]},
+
+{"description": "Valid Unicode character U+0065",
+"input": "\u0065",
+"output": [["Character", "\u0065"]]},
+
+{"description": "Valid Unicode character U+0066",
+"input": "\u0066",
+"output": [["Character", "\u0066"]]},
+
+{"description": "Valid Unicode character U+0067",
+"input": "\u0067",
+"output": [["Character", "\u0067"]]},
+
+{"description": "Valid Unicode character U+0068",
+"input": "\u0068",
+"output": [["Character", "\u0068"]]},
+
+{"description": "Valid Unicode character U+0069",
+"input": "\u0069",
+"output": [["Character", "\u0069"]]},
+
+{"description": "Valid Unicode character U+006A",
+"input": "\u006A",
+"output": [["Character", "\u006A"]]},
+
+{"description": "Valid Unicode character U+006B",
+"input": "\u006B",
+"output": [["Character", "\u006B"]]},
+
+{"description": "Valid Unicode character U+006C",
+"input": "\u006C",
+"output": [["Character", "\u006C"]]},
+
+{"description": "Valid Unicode character U+006D",
+"input": "\u006D",
+"output": [["Character", "\u006D"]]},
+
+{"description": "Valid Unicode character U+006E",
+"input": "\u006E",
+"output": [["Character", "\u006E"]]},
+
+{"description": "Valid Unicode character U+006F",
+"input": "\u006F",
+"output": [["Character", "\u006F"]]},
+
+{"description": "Valid Unicode character U+0070",
+"input": "\u0070",
+"output": [["Character", "\u0070"]]},
+
+{"description": "Valid Unicode character U+0071",
+"input": "\u0071",
+"output": [["Character", "\u0071"]]},
+
+{"description": "Valid Unicode character U+0072",
+"input": "\u0072",
+"output": [["Character", "\u0072"]]},
+
+{"description": "Valid Unicode character U+0073",
+"input": "\u0073",
+"output": [["Character", "\u0073"]]},
+
+{"description": "Valid Unicode character U+0074",
+"input": "\u0074",
+"output": [["Character", "\u0074"]]},
+
+{"description": "Valid Unicode character U+0075",
+"input": "\u0075",
+"output": [["Character", "\u0075"]]},
+
+{"description": "Valid Unicode character U+0076",
+"input": "\u0076",
+"output": [["Character", "\u0076"]]},
+
+{"description": "Valid Unicode character U+0077",
+"input": "\u0077",
+"output": [["Character", "\u0077"]]},
+
+{"description": "Valid Unicode character U+0078",
+"input": "\u0078",
+"output": [["Character", "\u0078"]]},
+
+{"description": "Valid Unicode character U+0079",
+"input": "\u0079",
+"output": [["Character", "\u0079"]]},
+
+{"description": "Valid Unicode character U+007A",
+"input": "\u007A",
+"output": [["Character", "\u007A"]]},
+
+{"description": "Valid Unicode character U+007B",
+"input": "\u007B",
+"output": [["Character", "\u007B"]]},
+
+{"description": "Valid Unicode character U+007C",
+"input": "\u007C",
+"output": [["Character", "\u007C"]]},
+
+{"description": "Valid Unicode character U+007D",
+"input": "\u007D",
+"output": [["Character", "\u007D"]]},
+
+{"description": "Valid Unicode character U+007E",
+"input": "\u007E",
+"output": [["Character", "\u007E"]]},
+
+{"description": "Valid Unicode character U+00A0",
+"input": "\u00A0",
+"output": [["Character", "\u00A0"]]},
+
+{"description": "Valid Unicode character U+00A1",
+"input": "\u00A1",
+"output": [["Character", "\u00A1"]]},
+
+{"description": "Valid Unicode character U+00A2",
+"input": "\u00A2",
+"output": [["Character", "\u00A2"]]},
+
+{"description": "Valid Unicode character U+00A3",
+"input": "\u00A3",
+"output": [["Character", "\u00A3"]]},
+
+{"description": "Valid Unicode character U+00A4",
+"input": "\u00A4",
+"output": [["Character", "\u00A4"]]},
+
+{"description": "Valid Unicode character U+00A5",
+"input": "\u00A5",
+"output": [["Character", "\u00A5"]]},
+
+{"description": "Valid Unicode character U+00A6",
+"input": "\u00A6",
+"output": [["Character", "\u00A6"]]},
+
+{"description": "Valid Unicode character U+00A7",
+"input": "\u00A7",
+"output": [["Character", "\u00A7"]]},
+
+{"description": "Valid Unicode character U+00A8",
+"input": "\u00A8",
+"output": [["Character", "\u00A8"]]},
+
+{"description": "Valid Unicode character U+00A9",
+"input": "\u00A9",
+"output": [["Character", "\u00A9"]]},
+
+{"description": "Valid Unicode character U+00AA",
+"input": "\u00AA",
+"output": [["Character", "\u00AA"]]},
+
+{"description": "Valid Unicode character U+00AB",
+"input": "\u00AB",
+"output": [["Character", "\u00AB"]]},
+
+{"description": "Valid Unicode character U+00AC",
+"input": "\u00AC",
+"output": [["Character", "\u00AC"]]},
+
+{"description": "Valid Unicode character U+00AD",
+"input": "\u00AD",
+"output": [["Character", "\u00AD"]]},
+
+{"description": "Valid Unicode character U+00AE",
+"input": "\u00AE",
+"output": [["Character", "\u00AE"]]},
+
+{"description": "Valid Unicode character U+00AF",
+"input": "\u00AF",
+"output": [["Character", "\u00AF"]]},
+
+{"description": "Valid Unicode character U+00B0",
+"input": "\u00B0",
+"output": [["Character", "\u00B0"]]},
+
+{"description": "Valid Unicode character U+00B1",
+"input": "\u00B1",
+"output": [["Character", "\u00B1"]]},
+
+{"description": "Valid Unicode character U+00B2",
+"input": "\u00B2",
+"output": [["Character", "\u00B2"]]},
+
+{"description": "Valid Unicode character U+00B3",
+"input": "\u00B3",
+"output": [["Character", "\u00B3"]]},
+
+{"description": "Valid Unicode character U+00B4",
+"input": "\u00B4",
+"output": [["Character", "\u00B4"]]},
+
+{"description": "Valid Unicode character U+00B5",
+"input": "\u00B5",
+"output": [["Character", "\u00B5"]]},
+
+{"description": "Valid Unicode character U+00B6",
+"input": "\u00B6",
+"output": [["Character", "\u00B6"]]},
+
+{"description": "Valid Unicode character U+00B7",
+"input": "\u00B7",
+"output": [["Character", "\u00B7"]]},
+
+{"description": "Valid Unicode character U+00B8",
+"input": "\u00B8",
+"output": [["Character", "\u00B8"]]},
+
+{"description": "Valid Unicode character U+00B9",
+"input": "\u00B9",
+"output": [["Character", "\u00B9"]]},
+
+{"description": "Valid Unicode character U+00BA",
+"input": "\u00BA",
+"output": [["Character", "\u00BA"]]},
+
+{"description": "Valid Unicode character U+00BB",
+"input": "\u00BB",
+"output": [["Character", "\u00BB"]]},
+
+{"description": "Valid Unicode character U+00BC",
+"input": "\u00BC",
+"output": [["Character", "\u00BC"]]},
+
+{"description": "Valid Unicode character U+00BD",
+"input": "\u00BD",
+"output": [["Character", "\u00BD"]]},
+
+{"description": "Valid Unicode character U+00BE",
+"input": "\u00BE",
+"output": [["Character", "\u00BE"]]},
+
+{"description": "Valid Unicode character U+00BF",
+"input": "\u00BF",
+"output": [["Character", "\u00BF"]]},
+
+{"description": "Valid Unicode character U+00C0",
+"input": "\u00C0",
+"output": [["Character", "\u00C0"]]},
+
+{"description": "Valid Unicode character U+00C1",
+"input": "\u00C1",
+"output": [["Character", "\u00C1"]]},
+
+{"description": "Valid Unicode character U+00C2",
+"input": "\u00C2",
+"output": [["Character", "\u00C2"]]},
+
+{"description": "Valid Unicode character U+00C3",
+"input": "\u00C3",
+"output": [["Character", "\u00C3"]]},
+
+{"description": "Valid Unicode character U+00C4",
+"input": "\u00C4",
+"output": [["Character", "\u00C4"]]},
+
+{"description": "Valid Unicode character U+00C5",
+"input": "\u00C5",
+"output": [["Character", "\u00C5"]]},
+
+{"description": "Valid Unicode character U+00C6",
+"input": "\u00C6",
+"output": [["Character", "\u00C6"]]},
+
+{"description": "Valid Unicode character U+00C7",
+"input": "\u00C7",
+"output": [["Character", "\u00C7"]]},
+
+{"description": "Valid Unicode character U+00C8",
+"input": "\u00C8",
+"output": [["Character", "\u00C8"]]},
+
+{"description": "Valid Unicode character U+00C9",
+"input": "\u00C9",
+"output": [["Character", "\u00C9"]]},
+
+{"description": "Valid Unicode character U+00CA",
+"input": "\u00CA",
+"output": [["Character", "\u00CA"]]},
+
+{"description": "Valid Unicode character U+00CB",
+"input": "\u00CB",
+"output": [["Character", "\u00CB"]]},
+
+{"description": "Valid Unicode character U+00CC",
+"input": "\u00CC",
+"output": [["Character", "\u00CC"]]},
+
+{"description": "Valid Unicode character U+00CD",
+"input": "\u00CD",
+"output": [["Character", "\u00CD"]]},
+
+{"description": "Valid Unicode character U+00CE",
+"input": "\u00CE",
+"output": [["Character", "\u00CE"]]},
+
+{"description": "Valid Unicode character U+00CF",
+"input": "\u00CF",
+"output": [["Character", "\u00CF"]]},
+
+{"description": "Valid Unicode character U+00D0",
+"input": "\u00D0",
+"output": [["Character", "\u00D0"]]},
+
+{"description": "Valid Unicode character U+00D1",
+"input": "\u00D1",
+"output": [["Character", "\u00D1"]]},
+
+{"description": "Valid Unicode character U+00D2",
+"input": "\u00D2",
+"output": [["Character", "\u00D2"]]},
+
+{"description": "Valid Unicode character U+00D3",
+"input": "\u00D3",
+"output": [["Character", "\u00D3"]]},
+
+{"description": "Valid Unicode character U+00D4",
+"input": "\u00D4",
+"output": [["Character", "\u00D4"]]},
+
+{"description": "Valid Unicode character U+00D5",
+"input": "\u00D5",
+"output": [["Character", "\u00D5"]]},
+
+{"description": "Valid Unicode character U+00D6",
+"input": "\u00D6",
+"output": [["Character", "\u00D6"]]},
+
+{"description": "Valid Unicode character U+00D7",
+"input": "\u00D7",
+"output": [["Character", "\u00D7"]]},
+
+{"description": "Valid Unicode character U+00D8",
+"input": "\u00D8",
+"output": [["Character", "\u00D8"]]},
+
+{"description": "Valid Unicode character U+00D9",
+"input": "\u00D9",
+"output": [["Character", "\u00D9"]]},
+
+{"description": "Valid Unicode character U+00DA",
+"input": "\u00DA",
+"output": [["Character", "\u00DA"]]},
+
+{"description": "Valid Unicode character U+00DB",
+"input": "\u00DB",
+"output": [["Character", "\u00DB"]]},
+
+{"description": "Valid Unicode character U+00DC",
+"input": "\u00DC",
+"output": [["Character", "\u00DC"]]},
+
+{"description": "Valid Unicode character U+00DD",
+"input": "\u00DD",
+"output": [["Character", "\u00DD"]]},
+
+{"description": "Valid Unicode character U+00DE",
+"input": "\u00DE",
+"output": [["Character", "\u00DE"]]},
+
+{"description": "Valid Unicode character U+00DF",
+"input": "\u00DF",
+"output": [["Character", "\u00DF"]]},
+
+{"description": "Valid Unicode character U+00E0",
+"input": "\u00E0",
+"output": [["Character", "\u00E0"]]},
+
+{"description": "Valid Unicode character U+00E1",
+"input": "\u00E1",
+"output": [["Character", "\u00E1"]]},
+
+{"description": "Valid Unicode character U+00E2",
+"input": "\u00E2",
+"output": [["Character", "\u00E2"]]},
+
+{"description": "Valid Unicode character U+00E3",
+"input": "\u00E3",
+"output": [["Character", "\u00E3"]]},
+
+{"description": "Valid Unicode character U+00E4",
+"input": "\u00E4",
+"output": [["Character", "\u00E4"]]},
+
+{"description": "Valid Unicode character U+00E5",
+"input": "\u00E5",
+"output": [["Character", "\u00E5"]]},
+
+{"description": "Valid Unicode character U+00E6",
+"input": "\u00E6",
+"output": [["Character", "\u00E6"]]},
+
+{"description": "Valid Unicode character U+00E7",
+"input": "\u00E7",
+"output": [["Character", "\u00E7"]]},
+
+{"description": "Valid Unicode character U+00E8",
+"input": "\u00E8",
+"output": [["Character", "\u00E8"]]},
+
+{"description": "Valid Unicode character U+00E9",
+"input": "\u00E9",
+"output": [["Character", "\u00E9"]]},
+
+{"description": "Valid Unicode character U+00EA",
+"input": "\u00EA",
+"output": [["Character", "\u00EA"]]},
+
+{"description": "Valid Unicode character U+00EB",
+"input": "\u00EB",
+"output": [["Character", "\u00EB"]]},
+
+{"description": "Valid Unicode character U+00EC",
+"input": "\u00EC",
+"output": [["Character", "\u00EC"]]},
+
+{"description": "Valid Unicode character U+00ED",
+"input": "\u00ED",
+"output": [["Character", "\u00ED"]]},
+
+{"description": "Valid Unicode character U+00EE",
+"input": "\u00EE",
+"output": [["Character", "\u00EE"]]},
+
+{"description": "Valid Unicode character U+00EF",
+"input": "\u00EF",
+"output": [["Character", "\u00EF"]]},
+
+{"description": "Valid Unicode character U+00F0",
+"input": "\u00F0",
+"output": [["Character", "\u00F0"]]},
+
+{"description": "Valid Unicode character U+00F1",
+"input": "\u00F1",
+"output": [["Character", "\u00F1"]]},
+
+{"description": "Valid Unicode character U+00F2",
+"input": "\u00F2",
+"output": [["Character", "\u00F2"]]},
+
+{"description": "Valid Unicode character U+00F3",
+"input": "\u00F3",
+"output": [["Character", "\u00F3"]]},
+
+{"description": "Valid Unicode character U+00F4",
+"input": "\u00F4",
+"output": [["Character", "\u00F4"]]},
+
+{"description": "Valid Unicode character U+00F5",
+"input": "\u00F5",
+"output": [["Character", "\u00F5"]]},
+
+{"description": "Valid Unicode character U+00F6",
+"input": "\u00F6",
+"output": [["Character", "\u00F6"]]},
+
+{"description": "Valid Unicode character U+00F7",
+"input": "\u00F7",
+"output": [["Character", "\u00F7"]]},
+
+{"description": "Valid Unicode character U+00F8",
+"input": "\u00F8",
+"output": [["Character", "\u00F8"]]},
+
+{"description": "Valid Unicode character U+00F9",
+"input": "\u00F9",
+"output": [["Character", "\u00F9"]]},
+
+{"description": "Valid Unicode character U+00FA",
+"input": "\u00FA",
+"output": [["Character", "\u00FA"]]},
+
+{"description": "Valid Unicode character U+00FB",
+"input": "\u00FB",
+"output": [["Character", "\u00FB"]]},
+
+{"description": "Valid Unicode character U+00FC",
+"input": "\u00FC",
+"output": [["Character", "\u00FC"]]},
+
+{"description": "Valid Unicode character U+00FD",
+"input": "\u00FD",
+"output": [["Character", "\u00FD"]]},
+
+{"description": "Valid Unicode character U+00FE",
+"input": "\u00FE",
+"output": [["Character", "\u00FE"]]},
+
+{"description": "Valid Unicode character U+00FF",
+"input": "\u00FF",
+"output": [["Character", "\u00FF"]]},
+
+{"description": "Valid Unicode character U+D7FF",
+"input": "\uD7FF",
+"output": [["Character", "\uD7FF"]]},
+
+{"description": "Valid Unicode character U+E000",
+"input": "\uE000",
+"output": [["Character", "\uE000"]]},
+
+{"description": "Valid Unicode character U+FDCF",
+"input": "\uFDCF",
+"output": [["Character", "\uFDCF"]]},
+
+{"description": "Valid Unicode character U+FDF0",
+"input": "\uFDF0",
+"output": [["Character", "\uFDF0"]]},
+
+{"description": "Valid Unicode character U+FFFD",
+"input": "\uFFFD",
+"output": [["Character", "\uFFFD"]]},
+
+{"description": "Valid Unicode character U+10000",
+"input": "\uD800\uDC00",
+"output": [["Character", "\uD800\uDC00"]]},
+
+{"description": "Valid Unicode character U+1FFFD",
+"input": "\uD83F\uDFFD",
+"output": [["Character", "\uD83F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+20000",
+"input": "\uD840\uDC00",
+"output": [["Character", "\uD840\uDC00"]]},
+
+{"description": "Valid Unicode character U+2FFFD",
+"input": "\uD87F\uDFFD",
+"output": [["Character", "\uD87F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+30000",
+"input": "\uD880\uDC00",
+"output": [["Character", "\uD880\uDC00"]]},
+
+{"description": "Valid Unicode character U+3FFFD",
+"input": "\uD8BF\uDFFD",
+"output": [["Character", "\uD8BF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+40000",
+"input": "\uD8C0\uDC00",
+"output": [["Character", "\uD8C0\uDC00"]]},
+
+{"description": "Valid Unicode character U+4FFFD",
+"input": "\uD8FF\uDFFD",
+"output": [["Character", "\uD8FF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+50000",
+"input": "\uD900\uDC00",
+"output": [["Character", "\uD900\uDC00"]]},
+
+{"description": "Valid Unicode character U+5FFFD",
+"input": "\uD93F\uDFFD",
+"output": [["Character", "\uD93F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+60000",
+"input": "\uD940\uDC00",
+"output": [["Character", "\uD940\uDC00"]]},
+
+{"description": "Valid Unicode character U+6FFFD",
+"input": "\uD97F\uDFFD",
+"output": [["Character", "\uD97F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+70000",
+"input": "\uD980\uDC00",
+"output": [["Character", "\uD980\uDC00"]]},
+
+{"description": "Valid Unicode character U+7FFFD",
+"input": "\uD9BF\uDFFD",
+"output": [["Character", "\uD9BF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+80000",
+"input": "\uD9C0\uDC00",
+"output": [["Character", "\uD9C0\uDC00"]]},
+
+{"description": "Valid Unicode character U+8FFFD",
+"input": "\uD9FF\uDFFD",
+"output": [["Character", "\uD9FF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+90000",
+"input": "\uDA00\uDC00",
+"output": [["Character", "\uDA00\uDC00"]]},
+
+{"description": "Valid Unicode character U+9FFFD",
+"input": "\uDA3F\uDFFD",
+"output": [["Character", "\uDA3F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+A0000",
+"input": "\uDA40\uDC00",
+"output": [["Character", "\uDA40\uDC00"]]},
+
+{"description": "Valid Unicode character U+AFFFD",
+"input": "\uDA7F\uDFFD",
+"output": [["Character", "\uDA7F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+B0000",
+"input": "\uDA80\uDC00",
+"output": [["Character", "\uDA80\uDC00"]]},
+
+{"description": "Valid Unicode character U+BFFFD",
+"input": "\uDABF\uDFFD",
+"output": [["Character", "\uDABF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+C0000",
+"input": "\uDAC0\uDC00",
+"output": [["Character", "\uDAC0\uDC00"]]},
+
+{"description": "Valid Unicode character U+CFFFD",
+"input": "\uDAFF\uDFFD",
+"output": [["Character", "\uDAFF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+D0000",
+"input": "\uDB00\uDC00",
+"output": [["Character", "\uDB00\uDC00"]]},
+
+{"description": "Valid Unicode character U+DFFFD",
+"input": "\uDB3F\uDFFD",
+"output": [["Character", "\uDB3F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+E0000",
+"input": "\uDB40\uDC00",
+"output": [["Character", "\uDB40\uDC00"]]},
+
+{"description": "Valid Unicode character U+EFFFD",
+"input": "\uDB7F\uDFFD",
+"output": [["Character", "\uDB7F\uDFFD"]]},
+
+{"description": "Valid Unicode character U+F0000",
+"input": "\uDB80\uDC00",
+"output": [["Character", "\uDB80\uDC00"]]},
+
+{"description": "Valid Unicode character U+FFFFD",
+"input": "\uDBBF\uDFFD",
+"output": [["Character", "\uDBBF\uDFFD"]]},
+
+{"description": "Valid Unicode character U+100000",
+"input": "\uDBC0\uDC00",
+"output": [["Character", "\uDBC0\uDC00"]]},
+
+{"description": "Valid Unicode character U+10FFFD",
+"input": "\uDBFF\uDFFD",
+"output": [["Character", "\uDBFF\uDFFD"]]}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeCharsProblematic.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeCharsProblematic.test
new file mode 100644
index 000000000..8fb54c411
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/unicodeCharsProblematic.test
@@ -0,0 +1,27 @@
+{"tests" : [
+{"description": "Invalid Unicode character U+DFFF",
+"doubleEscaped":true,
+"input": "\\uDFFF",
+"output":["ParseError", ["Character", "\\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+D800",
+"doubleEscaped":true,
+"input": "\\uD800",
+"output":["ParseError", ["Character", "\\uD800"]]},
+
+{"description": "Invalid Unicode character U+DFFF with valid preceding character",
+"doubleEscaped":true,
+"input": "a\\uDFFF",
+"output":[["Character", "a"], "ParseError", ["Character", "\\uDFFF"]]},
+
+{"description": "Invalid Unicode character U+D800 with valid following character",
+"doubleEscaped":true,
+"input": "\\uD800a",
+"output":["ParseError", ["Character", "\\uD800a"]]},
+
+{"description":"CR followed by U+0000",
+"input":"\r\u0000",
+"output":[["Character", "\n"], "ParseError", ["Character", "\u0000"]],
+"ignoreErrorOrder":true}
+]
+} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/xmlViolation.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/xmlViolation.test
new file mode 100644
index 000000000..137d96429
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tokenizer/xmlViolation.test
@@ -0,0 +1,22 @@
+{"xmlViolationTests": [
+
+{"description":"Non-XML character",
+"input":"a\uFFFFb",
+"ignoreErrorOrder":true,
+"output":["ParseError",["Character","a\uFFFDb"]]},
+
+{"description":"Non-XML space",
+"input":"a\u000Cb",
+"ignoreErrorOrder":true,
+"output":[["Character","a b"]]},
+
+{"description":"Double hyphen in comment",
+"input":"<!-- foo -- bar -->",
+"output":["ParseError",["Comment"," foo - - bar "]]},
+
+{"description":"FF between attributes",
+"input":"<a b=''\u000Cc=''>",
+"output":[["StartTag","a",{"b":"","c":""}]]}
+]}
+
+
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/README.md b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/README.md
new file mode 100644
index 000000000..2aba4bf86
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/README.md
@@ -0,0 +1,98 @@
+Tree Construction Tests
+=======================
+
+Each file containing tree construction tests consists of any number of
+tests separated by two newlines (LF) and a single newline before the end
+of the file. For instance:
+
+ [TEST]LF
+ LF
+ [TEST]LF
+ LF
+ [TEST]LF
+
+Where [TEST] is the following format:
+
+Each test must begin with a string "\#data" followed by a newline (LF).
+All subsequent lines until a line that says "\#errors" are the test data
+and must be passed to the system being tested unchanged, except with the
+final newline (on the last line) removed.
+
+Then there must be a line that says "\#errors". It must be followed by
+one line per parse error that a conformant checker would return. It
+doesn't matter what those lines are, although they can't be
+"\#document-fragment", "\#document", "\#script-off", "\#script-on", or
+empty, the only thing that matters is that there be the right number
+of parse errors.
+
+Then there \*may\* be a line that says "\#document-fragment", which must
+be followed by a newline (LF), followed by a string of characters that
+indicates the context element, followed by a newline (LF). If this line
+is present the "\#data" must be parsed using the HTML fragment parsing
+algorithm with the context element as context.
+
+Then there \*may\* be a line that says "\#script-off" or
+"\#script-in". If a line that says "\#script-off" is present, the
+parser must set the scripting flag to disabled. If a line that says
+"\#script-on" is present, it must set it to enabled. Otherwise, the
+test should be run in both modes.
+
+Then there must be a line that says "\#document", which must be followed
+by a dump of the tree of the parsed DOM. Each node must be represented
+by a single line. Each line must start with "| ", followed by two spaces
+per parent node that the node has before the root document node.
+
+- Element nodes must be represented by a "`<`" then the *tag name
+ string* "`>`", and all the attributes must be given, sorted
+ lexicographically by UTF-16 code unit according to their *attribute
+ name string*, on subsequent lines, as if they were children of the
+ element node.
+- Attribute nodes must have the *attribute name string*, then an "="
+ sign, then the attribute value in double quotes (").
+- Text nodes must be the string, in double quotes. Newlines aren't
+ escaped.
+- Comments must be "`<`" then "`!-- `" then the data then "` -->`".
+- DOCTYPEs must be "`<!DOCTYPE `" then the name then if either of the
+ system id or public id is non-empty a space, public id in
+ double-quotes, another space an the system id in double-quotes, and
+ then in any case "`>`".
+- Processing instructions must be "`<?`", then the target, then a
+ space, then the data and then "`>`". (The HTML parser cannot emit
+ processing instructions, but scripts can, and the WebVTT to DOM
+ rules can emit them.)
+- Template contents are represented by the string "content" with the
+ children below it.
+
+The *tag name string* is the local name prefixed by a namespace
+designator. For the HTML namespace, the namespace designator is the
+empty string, i.e. there's no prefix. For the SVG namespace, the
+namespace designator is "svg ". For the MathML namespace, the namespace
+designator is "math ".
+
+The *attribute name string* is the local name prefixed by a namespace
+designator. For no namespace, the namespace designator is the empty
+string, i.e. there's no prefix. For the XLink namespace, the namespace
+designator is "xlink ". For the XML namespace, the namespace designator
+is "xml ". For the XMLNS namespace, the namespace designator is "xmlns
+". Note the difference between "xlink:href" which is an attribute in no
+namespace with the local name "xlink:href" and "xlink href" which is an
+attribute in the xlink namespace with the local name "href".
+
+If there is also a "\#document-fragment" the bit following "\#document"
+must be a representation of the HTML fragment serialization for the
+context element given by "\#document-fragment".
+
+For example:
+
+ #data
+ <p>One<p>Two
+ #errors
+ 3: Missing document type declaration
+ #document
+ | <html>
+ | <head>
+ | <body>
+ | <p>
+ | "One"
+ | <p>
+ | "Two"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption01.dat
new file mode 100644
index 000000000..2e1127e51
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption01.dat
@@ -0,0 +1,337 @@
+#data
+<a><p></a></p>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,10): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <p>
+| <a>
+
+#data
+<a>1<p>2</a>3</p>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,12): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <p>
+| <a>
+| "2"
+| "3"
+
+#data
+<a>1<button>2</a>3</button>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,17): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <button>
+| <a>
+| "2"
+| "3"
+
+#data
+<a>1<b>2</a>3</b>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,12): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <b>
+| "2"
+| <b>
+| "3"
+
+#data
+<a>1<div>2<div>3</a>4</div>5</div>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,20): adoption-agency-1.3
+(1,20): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <div>
+| <a>
+| "2"
+| <div>
+| <a>
+| "3"
+| "4"
+| "5"
+
+#data
+<table><a>1<p>2</a>3</p>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,10): unexpected-start-tag-implies-table-voodoo
+(1,11): unexpected-character-implies-table-voodoo
+(1,14): unexpected-start-tag-implies-table-voodoo
+(1,15): unexpected-character-implies-table-voodoo
+(1,19): unexpected-end-tag-implies-table-voodoo
+(1,19): adoption-agency-1.3
+(1,20): unexpected-character-implies-table-voodoo
+(1,24): unexpected-end-tag-implies-table-voodoo
+(1,24): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <p>
+| <a>
+| "2"
+| "3"
+| <table>
+
+#data
+<b><b><a><p></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,16): adoption-agency-1.3
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <b>
+| <a>
+| <p>
+| <a>
+
+#data
+<b><a><b><p></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,16): adoption-agency-1.3
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <a>
+| <b>
+| <b>
+| <p>
+| <a>
+
+#data
+<a><b><b><p></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,16): adoption-agency-1.3
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <b>
+| <b>
+| <p>
+| <a>
+
+#data
+<p>1<s id="A">2<b id="B">3</p>4</s>5</b>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,30): unexpected-end-tag
+(1,35): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| "1"
+| <s>
+| id="A"
+| "2"
+| <b>
+| id="B"
+| "3"
+| <s>
+| id="A"
+| <b>
+| id="B"
+| "4"
+| <b>
+| id="B"
+| "5"
+
+#data
+<table><a>1<td>2</td>3</table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,10): unexpected-start-tag-implies-table-voodoo
+(1,11): unexpected-character-implies-table-voodoo
+(1,15): unexpected-cell-in-table-body
+(1,30): unexpected-implied-end-tag-in-table-view
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "1"
+| <a>
+| "3"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "2"
+
+#data
+<table>A<td>B</td>C</table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,8): unexpected-character-implies-table-voodoo
+(1,12): unexpected-cell-in-table-body
+(1,22): unexpected-character-implies-table-voodoo
+#document
+| <html>
+| <head>
+| <body>
+| "AC"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "B"
+
+#data
+<a><svg><tr><input></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,23): unexpected-end-tag
+(1,23): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <svg svg>
+| <svg tr>
+| <svg input>
+
+#data
+<div><a><b><div><div><div><div><div><div><div><div><div><div></a>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): adoption-agency-1.3
+(1,65): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <a>
+| <b>
+| <b>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <a>
+| <div>
+| <div>
+
+#data
+<div><a><b><u><i><code><div></a>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,32): adoption-agency-1.3
+(1,32): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <a>
+| <b>
+| <u>
+| <i>
+| <code>
+| <u>
+| <i>
+| <code>
+| <div>
+| <a>
+
+#data
+<b><b><b><b>x</b></b></b></b>y
+#errors
+(1,3): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <b>
+| <b>
+| <b>
+| "x"
+| "y"
+
+#data
+<p><b><b><b><b><p>x
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,18): unexpected-end-tag
+(1,19): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <b>
+| <b>
+| <b>
+| <p>
+| <b>
+| <b>
+| <b>
+| "x"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption02.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption02.dat
new file mode 100644
index 000000000..e54d8033b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/adoption02.dat
@@ -0,0 +1,39 @@
+#data
+<b>1<i>2<p>3</b>4
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,16): adoption-agency-1.3
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "1"
+| <i>
+| "2"
+| <i>
+| <p>
+| <b>
+| "3"
+| "4"
+
+#data
+<a><div><style></style><address><a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,35): unexpected-start-tag-implies-end-tag
+(1,35): adoption-agency-1.3
+(1,35): adoption-agency-1.3
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <div>
+| <a>
+| <style>
+| <address>
+| <a>
+| <a>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/comments01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/comments01.dat
new file mode 100644
index 000000000..35ec6cced
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/comments01.dat
@@ -0,0 +1,178 @@
+#data
+FOO<!-- BAR -->BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -->
+| "BAZ"
+
+#data
+FOO<!-- BAR --!>BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,15): unexpected-bang-after-double-dash-in-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -->
+| "BAZ"
+
+#data
+FOO<!-- BAR -- >BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,15): unexpected-char-in-comment
+(1,21): eof-in-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- >BAZ -->
+
+#data
+FOO<!-- BAR -- <QUX> -- MUX -->BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,15): unexpected-char-in-comment
+(1,24): unexpected-char-in-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- <QUX> -- MUX -->
+| "BAZ"
+
+#data
+FOO<!-- BAR -- <QUX> -- MUX --!>BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,15): unexpected-char-in-comment
+(1,24): unexpected-char-in-comment
+(1,31): unexpected-bang-after-double-dash-in-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- <QUX> -- MUX -->
+| "BAZ"
+
+#data
+FOO<!-- BAR -- <QUX> -- MUX -- >BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,15): unexpected-char-in-comment
+(1,24): unexpected-char-in-comment
+(1,31): unexpected-char-in-comment
+(1,35): eof-in-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- BAR -- <QUX> -- MUX -- >BAZ -->
+
+#data
+FOO<!---->BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- -->
+| "BAZ"
+
+#data
+FOO<!--->BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,9): incorrect-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- -->
+| "BAZ"
+
+#data
+FOO<!-->BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,8): incorrect-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- -->
+| "BAZ"
+
+#data
+<?xml version="1.0">Hi
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,22): expected-doctype-but-got-chars
+#document
+| <!-- ?xml version="1.0" -->
+| <html>
+| <head>
+| <body>
+| "Hi"
+
+#data
+<?xml version="1.0">
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,20): expected-doctype-but-got-eof
+#document
+| <!-- ?xml version="1.0" -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?xml version
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,13): expected-doctype-but-got-eof
+#document
+| <!-- ?xml version -->
+| <html>
+| <head>
+| <body>
+
+#data
+FOO<!----->BAZ
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,10): unexpected-dash-after-double-dash-in-comment
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <!-- - -->
+| "BAZ"
+
+#data
+<html><!-- comment --><title>Comment before head</title>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <!-- comment -->
+| <head>
+| <title>
+| "Comment before head"
+| <body>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/doctype01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/doctype01.dat
new file mode 100644
index 000000000..cec663897
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/doctype01.dat
@@ -0,0 +1,424 @@
+#data
+<!DOCTYPE html>Hello
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!dOctYpE HtMl>Hello
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPEhtml>Hello
+#errors
+(1,9): need-space-after-doctype
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE>Hello
+#errors
+(1,9): need-space-after-doctype
+(1,10): expected-doctype-name-but-got-right-bracket
+(1,10): unknown-doctype
+#document
+| <!DOCTYPE >
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE >Hello
+#errors
+(1,11): expected-doctype-name-but-got-right-bracket
+(1,11): unknown-doctype
+#document
+| <!DOCTYPE >
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato>Hello
+#errors
+(1,17): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato >Hello
+#errors
+(1,18): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato taco>Hello
+#errors
+(1,17): expected-space-or-right-bracket-in-doctype
+(1,22): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato taco "ddd>Hello
+#errors
+(1,17): expected-space-or-right-bracket-in-doctype
+(1,27): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato sYstEM>Hello
+#errors
+(1,24): unexpected-char-in-doctype
+(1,24): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato sYstEM >Hello
+#errors
+(1,28): unexpected-char-in-doctype
+(1,28): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato sYstEM ggg>Hello
+#errors
+(1,34): unexpected-char-in-doctype
+(1,37): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM taco >Hello
+#errors
+(1,25): unexpected-char-in-doctype
+(1,31): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM 'taco"'>Hello
+#errors
+(1,32): unknown-doctype
+#document
+| <!DOCTYPE potato "" "taco"">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM "taco">Hello
+#errors
+(1,31): unknown-doctype
+#document
+| <!DOCTYPE potato "" "taco">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEM "tai'co">Hello
+#errors
+(1,33): unknown-doctype
+#document
+| <!DOCTYPE potato "" "tai'co">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato SYSTEMtaco "ddd">Hello
+#errors
+(1,24): unexpected-char-in-doctype
+(1,34): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato grass SYSTEM taco>Hello
+#errors
+(1,17): expected-space-or-right-bracket-in-doctype
+(1,35): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato pUbLIc>Hello
+#errors
+(1,24): unexpected-end-of-doctype
+(1,24): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato pUbLIc >Hello
+#errors
+(1,25): unexpected-end-of-doctype
+(1,25): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato pUbLIcgoof>Hello
+#errors
+(1,24): unexpected-char-in-doctype
+(1,28): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC goof>Hello
+#errors
+(1,25): unexpected-char-in-doctype
+(1,29): unknown-doctype
+#document
+| <!DOCTYPE potato>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC "go'of">Hello
+#errors
+(1,32): unknown-doctype
+#document
+| <!DOCTYPE potato "go'of" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC 'go'of'>Hello
+#errors
+(1,29): unexpected-char-in-doctype
+(1,32): unknown-doctype
+#document
+| <!DOCTYPE potato "go" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC 'go:hh of' >Hello
+#errors
+(1,38): unknown-doctype
+#document
+| <!DOCTYPE potato "go:hh of" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE potato PUBLIC "W3C-//dfdf" SYSTEM ggg>Hello
+#errors
+(1,38): unexpected-char-in-doctype
+(1,48): unknown-doctype
+#document
+| <!DOCTYPE potato "W3C-//dfdf" "">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">Hello
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE ...>Hello
+#errors
+(1,14): unknown-doctype
+#document
+| <!DOCTYPE ...>
+| <html>
+| <head>
+| <body>
+| "Hello"
+
+#data
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+#errors
+(2,58): unknown-doctype
+#document
+| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
+#errors
+(2,54): unknown-doctype
+#document
+| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE root-element [SYSTEM OR PUBLIC FPI] "uri" [
+<!-- internal declarations -->
+]>
+#errors
+(1,23): expected-space-or-right-bracket-in-doctype
+(2,30): unknown-doctype
+#document
+| <!DOCTYPE root-element>
+| <html>
+| <head>
+| <body>
+| "]>"
+
+#data
+<!DOCTYPE html PUBLIC
+ "-//WAPFORUM//DTD XHTML Mobile 1.0//EN"
+ "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
+#errors
+(3,53): unknown-doctype
+#document
+| <!DOCTYPE html "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML SYSTEM "http://www.w3.org/DTD/HTML4-strict.dtd"><body><b>Mine!</b></body>
+#errors
+(1,63): unknown-doctype
+#document
+| <!DOCTYPE html "" "http://www.w3.org/DTD/HTML4-strict.dtd">
+| <html>
+| <head>
+| <body>
+| <b>
+| "Mine!"
+
+#data
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN""http://www.w3.org/TR/html4/strict.dtd">
+#errors
+(1,50): unexpected-char-in-doctype
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
+#errors
+(1,50): unexpected-char-in-doctype
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML PUBLIC"-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
+#errors
+(1,21): unexpected-char-in-doctype
+(1,49): unexpected-char-in-doctype
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML PUBLIC'-//W3C//DTD HTML 4.01//EN''http://www.w3.org/TR/html4/strict.dtd'>
+#errors
+(1,21): unexpected-char-in-doctype
+(1,49): unexpected-char-in-doctype
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+| <html>
+| <head>
+| <body>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/domjs-unsafe.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/domjs-unsafe.dat
new file mode 100644
index 000000000..34b4e6271
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/domjs-unsafe.dat
Binary files differ
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities01.dat
new file mode 100644
index 000000000..20d53a0fd
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities01.dat
@@ -0,0 +1,723 @@
+#data
+FOO&gt;BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO>BAR"
+
+#data
+FOO&gtBAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,6): named-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| "FOO>BAR"
+
+#data
+FOO&gt BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,6): named-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| "FOO> BAR"
+
+#data
+FOO&gt;;;BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO>;;BAR"
+
+#data
+I'm &notit; I tell you
+#errors
+(1,4): expected-doctype-but-got-chars
+(1,9): named-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| "I'm ¬it; I tell you"
+
+#data
+I'm &notin; I tell you
+#errors
+(1,4): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "I'm ∉ I tell you"
+
+#data
+FOO& BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO& BAR"
+
+#data
+FOO&<BAR>
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,9): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&"
+| <bar>
+
+#data
+FOO&&&&gt;BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&&&>BAR"
+
+#data
+FOO&#41;BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO)BAR"
+
+#data
+FOO&#x41;BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOOABAR"
+
+#data
+FOO&#X41;BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOOABAR"
+
+#data
+FOO&#BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,5): expected-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#BAR"
+
+#data
+FOO&#ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,5): expected-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#ZOO"
+
+#data
+FOO&#xBAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,7): expected-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOºR"
+
+#data
+FOO&#xZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,6): expected-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#xZOO"
+
+#data
+FOO&#XZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,6): expected-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO&#XZOO"
+
+#data
+FOO&#41BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,7): numeric-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| "FOO)BAR"
+
+#data
+FOO&#x41BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,10): numeric-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| "FOO䆺R"
+
+#data
+FOO&#x41ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,8): numeric-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| "FOOAZOO"
+
+#data
+FOO&#x0000;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#x0078;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOOxZOO"
+
+#data
+FOO&#x0079;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOOyZOO"
+
+#data
+FOO&#x0080;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO€ZOO"
+
+#data
+FOO&#x0081;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x0082;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‚ZOO"
+
+#data
+FOO&#x0083;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÆ’ZOO"
+
+#data
+FOO&#x0084;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO„ZOO"
+
+#data
+FOO&#x0085;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO…ZOO"
+
+#data
+FOO&#x0086;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO†ZOO"
+
+#data
+FOO&#x0087;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‡ZOO"
+
+#data
+FOO&#x0088;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOˆZOO"
+
+#data
+FOO&#x0089;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‰ZOO"
+
+#data
+FOO&#x008A;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ ZOO"
+
+#data
+FOO&#x008B;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‹ZOO"
+
+#data
+FOO&#x008C;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ’ZOO"
+
+#data
+FOO&#x008D;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x008E;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOŽZOO"
+
+#data
+FOO&#x008F;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x0090;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x0091;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO‘ZOO"
+
+#data
+FOO&#x0092;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO’ZOO"
+
+#data
+FOO&#x0093;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO“ZOO"
+
+#data
+FOO&#x0094;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOâ€ZOO"
+
+#data
+FOO&#x0095;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO•ZOO"
+
+#data
+FOO&#x0096;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO–ZOO"
+
+#data
+FOO&#x0097;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO—ZOO"
+
+#data
+FOO&#x0098;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOËœZOO"
+
+#data
+FOO&#x0099;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOâ„¢ZOO"
+
+#data
+FOO&#x009A;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ¡ZOO"
+
+#data
+FOO&#x009B;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO›ZOO"
+
+#data
+FOO&#x009C;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÅ“ZOO"
+
+#data
+FOO&#x009D;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOÂZOO"
+
+#data
+FOO&#x009E;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOžZOO"
+
+#data
+FOO&#x009F;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOŸZOO"
+
+#data
+FOO&#x00A0;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO ZOO"
+
+#data
+FOO&#xD7FF;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO퟿ZOO"
+
+#data
+FOO&#xD800;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xD801;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xDFFE;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xDFFF;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,11): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xE000;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOOZOO"
+
+#data
+FOO&#x10FFFE;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,13): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOô¿¾ZOO"
+
+#data
+FOO&#x1087D4;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO􈟔ZOO"
+
+#data
+FOO&#x10FFFF;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,13): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOOô¿¿ZOO"
+
+#data
+FOO&#x110000;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,13): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
+
+#data
+FOO&#xFFFFFF;ZOO
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,13): illegal-codepoint-for-numeric-entity
+#document
+| <html>
+| <head>
+| <body>
+| "FOO�ZOO"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities02.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities02.dat
new file mode 100644
index 000000000..f117f068a
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/entities02.dat
@@ -0,0 +1,283 @@
+#data
+<div bar="ZZ&gt;YY"></div>
+#errors
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>YY"
+
+#data
+<div bar="ZZ&"></div>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&"
+
+#data
+<div bar='ZZ&'></div>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&"
+
+#data
+<div bar=ZZ&></div>
+#errors
+(1,13): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&"
+
+#data
+<div bar="ZZ&gt=YY"></div>
+#errors
+(1,15): named-entity-without-semicolon
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gt=YY"
+
+#data
+<div bar="ZZ&gt0YY"></div>
+#errors
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gt0YY"
+
+#data
+<div bar="ZZ&gt9YY"></div>
+#errors
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gt9YY"
+
+#data
+<div bar="ZZ&gtaYY"></div>
+#errors
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gtaYY"
+
+#data
+<div bar="ZZ&gtZYY"></div>
+#errors
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&gtZYY"
+
+#data
+<div bar="ZZ&gt YY"></div>
+#errors
+(1,15): named-entity-without-semicolon
+(1,20): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ> YY"
+
+#data
+<div bar="ZZ&gt"></div>
+#errors
+(1,15): named-entity-without-semicolon
+(1,17): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>"
+
+#data
+<div bar='ZZ&gt'></div>
+#errors
+(1,15): named-entity-without-semicolon
+(1,17): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>"
+
+#data
+<div bar=ZZ&gt></div>
+#errors
+(1,14): named-entity-without-semicolon
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ>"
+
+#data
+<div bar="ZZ&pound_id=23"></div>
+#errors
+(1,18): named-entity-without-semicolon
+(1,26): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ£_id=23"
+
+#data
+<div bar="ZZ&prod_id=23"></div>
+#errors
+(1,25): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&prod_id=23"
+
+#data
+<div bar="ZZ&pound;_id=23"></div>
+#errors
+(1,27): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ£_id=23"
+
+#data
+<div bar="ZZ&prod;_id=23"></div>
+#errors
+(1,26): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZâˆ_id=23"
+
+#data
+<div bar="ZZ&pound=23"></div>
+#errors
+(1,18): named-entity-without-semicolon
+(1,23): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&pound=23"
+
+#data
+<div bar="ZZ&prod=23"></div>
+#errors
+(1,22): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| bar="ZZ&prod=23"
+
+#data
+<div>ZZ&pound_id=23</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,13): named-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ£_id=23"
+
+#data
+<div>ZZ&prod_id=23</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ&prod_id=23"
+
+#data
+<div>ZZ&pound;_id=23</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ£_id=23"
+
+#data
+<div>ZZ&prod;_id=23</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZâˆ_id=23"
+
+#data
+<div>ZZ&pound=23</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,13): named-entity-without-semicolon
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ£=23"
+
+#data
+<div>ZZ&prod=23</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "ZZ&prod=23"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/html5test-com.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/html5test-com.dat
new file mode 100644
index 000000000..8c6ec40cd
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/html5test-com.dat
@@ -0,0 +1,291 @@
+#data
+<div<div>
+#errors
+(1,9): expected-doctype-but-got-start-tag
+(1,9): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div<div>
+
+#data
+<div foo<bar=''>
+#errors
+(1,9): invalid-character-in-attribute-name
+(1,16): expected-doctype-but-got-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo<bar=""
+
+#data
+<div foo=`bar`>
+#errors
+(1,10): equals-in-unquoted-attribute-value
+(1,14): unexpected-character-in-unquoted-attribute-value
+(1,15): expected-doctype-but-got-start-tag
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo="`bar`"
+
+#data
+<div \"foo=''>
+#errors
+(1,7): invalid-character-in-attribute-name
+(1,14): expected-doctype-but-got-start-tag
+(1,14): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| \"foo=""
+
+#data
+<a href='\nbar'></a>
+#errors
+(1,16): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="\nbar"
+
+#data
+<!DOCTYPE html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+&lang;&rang;
+#errors
+(1,6): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "⟨⟩"
+
+#data
+&apos;
+#errors
+(1,6): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "'"
+
+#data
+&ImaginaryI;
+#errors
+(1,12): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "â…ˆ"
+
+#data
+&Kopf;
+#errors
+(1,6): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "ð•‚"
+
+#data
+&notinva;
+#errors
+(1,9): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "∉"
+
+#data
+<?import namespace="foo" implementation="#bar">
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,47): expected-doctype-but-got-eof
+#document
+| <!-- ?import namespace="foo" implementation="#bar" -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!--foo--bar-->
+#errors
+(1,10): unexpected-char-in-comment
+(1,15): expected-doctype-but-got-eof
+#document
+| <!-- foo--bar -->
+| <html>
+| <head>
+| <body>
+
+#data
+<![CDATA[x]]>
+#errors
+(1,2): expected-dashes-or-doctype
+(1,13): expected-doctype-but-got-eof
+#document
+| <!-- [CDATA[x]] -->
+| <html>
+| <head>
+| <body>
+
+#data
+<textarea><!--</textarea>--></textarea>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,39): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--"
+| "-->"
+
+#data
+<textarea><!--</textarea>-->
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--"
+| "-->"
+
+#data
+<style><!--</style>--></style>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,30): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "-->"
+
+#data
+<style><!--</style>-->
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "-->"
+
+#data
+<ul><li>A </li> <li>B</li></ul>
+#errors
+(1,4): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ul>
+| <li>
+| "A "
+| " "
+| <li>
+| "B"
+
+#data
+<table><form><input type=hidden><input></form><div></div></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,13): unexpected-form-in-table
+(1,32): unexpected-hidden-input-in-table
+(1,39): unexpected-start-tag-implies-table-voodoo
+(1,46): unexpected-end-tag-implies-table-voodoo
+(1,46): unexpected-end-tag
+(1,51): unexpected-start-tag-implies-table-voodoo
+(1,57): unexpected-end-tag-implies-table-voodoo
+#document
+| <html>
+| <head>
+| <body>
+| <input>
+| <div>
+| <table>
+| <form>
+| <input>
+| type="hidden"
+
+#data
+<i>A<b>B<p></i>C</b>D
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,15): adoption-agency-1.3
+(1,20): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <i>
+| "A"
+| <b>
+| "B"
+| <b>
+| <p>
+| <b>
+| <i>
+| "C"
+| "D"
+
+#data
+<div></div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<svg></svg>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<math></math>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/inbody01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/inbody01.dat
new file mode 100644
index 000000000..10f6520f6
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/inbody01.dat
@@ -0,0 +1,54 @@
+#data
+<button>1</foo>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,15): unexpected-end-tag
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <button>
+| "1"
+
+#data
+<foo>1<p>2</foo>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,16): unexpected-end-tag
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| "1"
+| <p>
+| "2"
+
+#data
+<dd>1</foo>
+#errors
+(1,4): expected-doctype-but-got-start-tag
+(1,11): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <dd>
+| "1"
+
+#data
+<foo>1<dd>2</foo>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): unexpected-end-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| "1"
+| <dd>
+| "2"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/isindex.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/isindex.dat
new file mode 100644
index 000000000..42ef997f7
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/isindex.dat
@@ -0,0 +1,47 @@
+#data
+<isindex>
+#errors
+(1,9): expected-doctype-but-got-start-tag
+(1,9): deprecated-tag
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<isindex name="A" action="B" prompt="C" foo="D">
+#errors
+(1,48): expected-doctype-but-got-start-tag
+(1,48): deprecated-tag
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+| action="B"
+| <hr>
+| <label>
+| "C"
+| <input>
+| foo="D"
+| name="isindex"
+| <hr>
+
+#data
+<form><isindex>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,15): deprecated-tag
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <form>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/main-element.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/main-element.dat
new file mode 100644
index 000000000..4b103bb0f
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/main-element.dat
@@ -0,0 +1,46 @@
+#data
+<!doctype html><p>foo<main>bar<p>baz
+#errors
+(1,36): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "foo"
+| <main>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!doctype html><main><p>foo</main>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <main>
+| <p>
+| "foo"
+| "bar"
+
+#data
+<!DOCTYPE html>xxx<svg><x><g><a><main><b>
+#errors
+ * (1,42) unexpected HTML-like start tag token in foreign content
+ * (1,42) unexpected end of file
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "xxx"
+| <svg svg>
+| <svg x>
+| <svg g>
+| <svg a>
+| <svg main>
+| <b>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes-plain-text-unsafe.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes-plain-text-unsafe.dat
new file mode 100644
index 000000000..3ee8cec90
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes-plain-text-unsafe.dat
Binary files differ
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes.dat
new file mode 100644
index 000000000..1647d7f23
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/pending-spec-changes.dat
@@ -0,0 +1,46 @@
+#data
+<input type="hidden"><frameset>
+#errors
+(1,21): expected-doctype-but-got-start-tag
+(1,31): unexpected-start-tag
+(1,31): eof-in-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><table><caption><svg>foo</table>bar
+#errors
+(1,47): unexpected-end-tag
+(1,47): end-table-tag-in-caption
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| "foo"
+| "bar"
+
+#data
+<table><tr><td><svg><desc><td></desc><circle>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,30): unexpected-cell-end-tag
+(1,37): unexpected-end-tag
+(1,45): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg desc>
+| <td>
+| <circle>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/plain-text-unsafe.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/plain-text-unsafe.dat
new file mode 100644
index 000000000..f40dd5760
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/plain-text-unsafe.dat
Binary files differ
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/ruby.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/ruby.dat
new file mode 100644
index 000000000..80d0c5331
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/ruby.dat
@@ -0,0 +1,298 @@
+#data
+<html><ruby>a<rb>b<rb></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <rb>
+
+#data
+<html><ruby>a<rb>b<rt></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <rt>
+
+#data
+<html><ruby>a<rb>b<rtc></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <rtc>
+
+#data
+<html><ruby>a<rb>b<rp></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <rp>
+
+#data
+<html><ruby>a<rb>b<span></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <span>
+
+#data
+<html><ruby>a<rt>b<rb></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rt>
+| "b"
+| <rb>
+
+#data
+<html><ruby>a<rt>b<rt></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rt>
+| "b"
+| <rt>
+
+#data
+<html><ruby>a<rt>b<rtc></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rt>
+| "b"
+| <rtc>
+
+#data
+<html><ruby>a<rt>b<rp></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rt>
+| "b"
+| <rp>
+
+#data
+<html><ruby>a<rt>b<span></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rt>
+| "b"
+| <span>
+
+#data
+<html><ruby>a<rtc>b<rb></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rtc>
+| "b"
+| <rb>
+
+#data
+<html><ruby>a<rtc>b<rt>c<rt>d</ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rtc>
+| "b"
+| <rt>
+| "c"
+| <rt>
+| "d"
+
+#data
+<html><ruby>a<rtc>b<rtc></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rtc>
+| "b"
+| <rtc>
+
+#data
+<html><ruby>a<rtc>b<rp></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rtc>
+| "b"
+| <rp>
+
+#data
+<html><ruby>a<rtc>b<span></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rtc>
+| "b"
+| <span>
+
+#data
+<html><ruby>a<rp>b<rb></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rp>
+| "b"
+| <rb>
+
+#data
+<html><ruby>a<rp>b<rt></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rp>
+| "b"
+| <rt>
+
+#data
+<html><ruby>a<rp>b<rtc></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rp>
+| "b"
+| <rtc>
+
+#data
+<html><ruby>a<rp>b<rp></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rp>
+| "b"
+| <rp>
+
+#data
+<html><ruby>a<rp>b<span></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rp>
+| "b"
+| <span>
+
+#data
+<html><ruby><rtc><ruby>a<rb>b<rt></ruby></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <rtc>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <rt>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scriptdata01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scriptdata01.dat
new file mode 100644
index 000000000..ac698d282
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scriptdata01.dat
@@ -0,0 +1,365 @@
+#data
+FOO<script>'Hello'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'Hello'"
+| "BAR"
+
+#data
+FOO<script></script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script></script >BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script></script/>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,21): self-closing-flag-on-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script></script/ >BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,20): unexpected-character-after-solidus-in-tag
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script type="text/plain"></scriptx>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,42): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "</scriptx>BAR"
+
+#data
+FOO<script></script foo=">" dd>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,31): attributes-in-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "BAR"
+
+#data
+FOO<script>'<'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<'"
+| "BAR"
+
+#data
+FOO<script>'<!'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!'"
+| "BAR"
+
+#data
+FOO<script>'<!-'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-'"
+| "BAR"
+
+#data
+FOO<script>'<!--'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!--'"
+| "BAR"
+
+#data
+FOO<script>'<!---'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!---'"
+| "BAR"
+
+#data
+FOO<script>'<!-->'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-->'"
+| "BAR"
+
+#data
+FOO<script>'<!-->'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-->'"
+| "BAR"
+
+#data
+FOO<script>'<!-- potato'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-- potato'"
+| "BAR"
+
+#data
+FOO<script>'<!-- <sCrIpt'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-- <sCrIpt'"
+| "BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt>'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,56): expected-script-data-but-got-eof
+(1,56): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt>'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> -'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,58): expected-script-data-but-got-eof
+(1,58): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> -'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> --'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,59): expected-script-data-but-got-eof
+(1,59): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> --'</script>BAR"
+
+#data
+FOO<script>'<!-- <sCrIpt> -->'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "'<!-- <sCrIpt> -->'"
+| "BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> --!>'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,61): expected-script-data-but-got-eof
+(1,61): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> --!>'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt> -- >'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,61): expected-script-data-but-got-eof
+(1,61): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt> -- >'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt '</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,56): expected-script-data-but-got-eof
+(1,56): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt '</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+(1,56): expected-script-data-but-got-eof
+(1,56): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt/'</script>BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt\'</script>BAR
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt\'"
+| "BAR"
+
+#data
+FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR</script>QUX
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| type="text/plain"
+| "'<!-- <sCrIpt/'</script>BAR"
+| "QUX"
+
+#data
+FOO<script><!--<script>-></script>--></script>QUX
+#errors
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "FOO"
+| <script>
+| "<!--<script>-></script>-->"
+| "QUX"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/adoption01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/adoption01.dat
new file mode 100644
index 000000000..4e08d0e84
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/adoption01.dat
@@ -0,0 +1,15 @@
+#data
+<p><b id="A"><script>document.getElementById("A").id = "B"</script></p>TEXT</b>
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| id="B"
+| <script>
+| "document.getElementById("A").id = "B""
+| <b>
+| id="A"
+| "TEXT"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/ark.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/ark.dat
new file mode 100644
index 000000000..acbac41df
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/ark.dat
@@ -0,0 +1,26 @@
+#data
+<p><font size=4><font size=4><font size=4><script>document.getElementsByTagName("font")[2].setAttribute("size", "5");</script><font size=4><p>X
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="5"
+| <script>
+| "document.getElementsByTagName("font")[2].setAttribute("size", "5");"
+| <font>
+| size="4"
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| "X"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/webkit01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/webkit01.dat
new file mode 100644
index 000000000..ef4a41ca0
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/scripted/webkit01.dat
@@ -0,0 +1,28 @@
+#data
+1<script>document.write("2")</script>3
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "1"
+| <script>
+| "document.write("2")"
+| "23"
+
+#data
+1<script>document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")</script>4
+#errors
+#document
+| <html>
+| <head>
+| <body>
+| "1"
+| <script>
+| "document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")"
+| <script>
+| "document.write('2')"
+| "2"
+| <script>
+| "document.write('3')"
+| "34"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tables01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tables01.dat
new file mode 100644
index 000000000..f0caaa3c5
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tables01.dat
@@ -0,0 +1,286 @@
+#data
+<table><th>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,11): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <th>
+
+#data
+<table><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,11): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><col foo='bar'>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,22): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <colgroup>
+| <col>
+| foo="bar"
+
+#data
+<table><colgroup></html>foo
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,24): unexpected-end-tag
+(1,27): foster-parenting-character-in-table
+(1,27): foster-parenting-character-in-table
+(1,27): foster-parenting-character-in-table
+(1,27): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| "foo"
+| <table>
+| <colgroup>
+
+#data
+<table></table><p>foo
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <p>
+| "foo"
+
+#data
+<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,14): unexpected-end-tag
+(1,24): unexpected-end-tag
+(1,30): unexpected-end-tag
+(1,41): unexpected-end-tag
+(1,48): unexpected-end-tag
+(1,56): unexpected-end-tag
+(1,61): unexpected-end-tag
+(1,69): unexpected-end-tag
+(1,74): unexpected-end-tag
+(1,82): unexpected-end-tag
+(1,87): unexpected-end-tag
+(1,91): unexpected-cell-in-table-body
+(1,91): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><select><option>3</select></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,15): unexpected-start-tag-implies-table-voodoo
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| "3"
+| <table>
+
+#data
+<table><select><table></table></select></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,15): unexpected-start-tag-implies-table-voodoo
+(1,22): unexpected-table-element-start-tag-in-select-in-table
+(1,22): unexpected-start-tag-implies-end-tag
+(1,39): unexpected-end-tag
+(1,47): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+| <table>
+
+#data
+<table><select></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,15): unexpected-start-tag-implies-table-voodoo
+(1,23): unexpected-table-element-end-tag-in-select-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+
+#data
+<table><select><option>A<tr><td>B</td></tr></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,15): unexpected-start-tag-implies-table-voodoo
+(1,28): unexpected-table-element-start-tag-in-select-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| "A"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "B"
+
+#data
+<table><td></body></caption></col></colgroup></html>foo
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,18): unexpected-end-tag
+(1,28): unexpected-end-tag
+(1,34): unexpected-end-tag
+(1,45): unexpected-end-tag
+(1,52): unexpected-end-tag
+(1,55): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "foo"
+
+#data
+<table><td>A</table>B
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "A"
+| "B"
+
+#data
+<table><tr><caption>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <caption>
+
+#data
+<table><tr></body></caption></col></colgroup></html></td></th><td>foo
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,18): unexpected-end-tag-in-table-row
+(1,28): unexpected-end-tag-in-table-row
+(1,34): unexpected-end-tag-in-table-row
+(1,45): unexpected-end-tag-in-table-row
+(1,52): unexpected-end-tag-in-table-row
+(1,57): unexpected-end-tag-in-table-row
+(1,62): unexpected-end-tag-in-table-row
+(1,69): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "foo"
+
+#data
+<table><td><tr>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,15): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <tr>
+
+#data
+<table><td><button><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,23): unexpected-cell-end-tag
+(1,23): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <button>
+| <td>
+
+#data
+<table><tr><td><svg><desc><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,30): unexpected-cell-end-tag
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg desc>
+| <td>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/template.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/template.dat
new file mode 100644
index 000000000..f370cc186
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/template.dat
@@ -0,0 +1,1591 @@
+#data
+<body><template>Hello</template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| "Hello"
+
+#data
+<template>Hello</template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <template>
+| content
+| "Hello"
+| <body>
+
+#data
+<template></template><div></div>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <body>
+| <div>
+
+#data
+<html><template>Hello</template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <template>
+| content
+| "Hello"
+| <body>
+
+#data
+<head><template><div></div></template></head>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <div>
+| <body>
+
+#data
+<div><template><div><span></template><b>
+#errors
+ * (1,6) missing DOCTYPE
+ * (1,38) mismatched template end tag
+ * (1,41) unexpected end of file
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <template>
+| content
+| <div>
+| <span>
+| <b>
+
+#data
+<div><template></div>Hello
+#errors
+ * (1,6) missing DOCTYPE
+ * (1,22) unexpected token in template
+ * (1,27) unexpected end of file in template
+ * (1,27) unexpected end of file
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <template>
+| content
+| "Hello"
+
+#data
+<div></template></div>
+#errors
+ * (1,6) missing DOCTYPE
+ * (1,17) unexpected template end tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<table><template></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+
+#data
+<table><template></template></div>
+#errors
+ * (1,8) missing DOCTYPE
+ * (1,35) unexpected token in table - foster parenting
+ * (1,35) unexpected end tag
+ * (1,35) unexpected end of file
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+
+#data
+<table><div><template></template></div>
+#errors
+ * (1,8) missing DOCTYPE
+ * (1,13) unexpected token in table - foster parenting
+ * (1,40) unexpected token in table - foster parenting
+ * (1,40) unexpected end of file
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <template>
+| content
+| <table>
+
+#data
+<table><template></template><div></div>
+#errors
+no doctype
+bad div in table
+bad /div in table
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <table>
+| <template>
+| content
+
+#data
+<table> <template></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <template>
+| content
+
+#data
+<table><tbody><template></template></tbody>
+#errors
+no doctype
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <template>
+| content
+
+#data
+<table><tbody><template></tbody></template>
+#errors
+no doctype
+bad /tbody
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <template>
+| content
+
+#data
+<table><tbody><template></template></tbody></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <template>
+| content
+
+#data
+<table><thead><template></template></thead>
+#errors
+no doctype
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <thead>
+| <template>
+| content
+
+#data
+<table><tfoot><template></template></tfoot>
+#errors
+no doctype
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tfoot>
+| <template>
+| content
+
+#data
+<select><template></template></select>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <template>
+| content
+
+#data
+<select><template><option></option></template></select>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <template>
+| content
+| <option>
+
+#data
+<template><option></option></select><option></option></template>
+#errors
+no doctype
+bad /select
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <option>
+| <option>
+| <body>
+
+#data
+<select><template></template><option></select>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <template>
+| content
+| <option>
+
+#data
+<select><option><template></template></select>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <template>
+| content
+
+#data
+<select><template>
+#errors
+no doctype
+eof in template
+eof in select
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <template>
+| content
+
+#data
+<select><option></option><template>
+#errors
+no doctype
+eof in template
+eof in select
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <template>
+| content
+
+#data
+<select><option></option><template><option>
+#errors
+no doctype
+eof in template
+eof in select
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <template>
+| content
+| <option>
+
+#data
+<table><thead><template><td></template></table>
+#errors
+ * (1,8) missing DOCTYPE
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <thead>
+| <template>
+| content
+| <td>
+
+#data
+<table><template><thead></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <thead>
+
+#data
+<body><table><template><td></tr><div></template></table>
+#errors
+no doctype
+bad </tr>
+missing </div>
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <td>
+| <div>
+
+#data
+<table><template><thead></template></thead></table>
+#errors
+no doctype
+bad /thead after /template
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <thead>
+
+#data
+<table><thead><template><tr></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <thead>
+| <template>
+| content
+| <tr>
+
+#data
+<table><template><tr></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <tr>
+
+#data
+<table><tr><template><td>
+#errors
+no doctype
+eof in template
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <template>
+| content
+| <td>
+
+#data
+<table><template><tr><template><td></template></tr></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <tr>
+| <template>
+| content
+| <td>
+
+#data
+<table><template><tr><template><td></td></template></tr></template></table>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <tr>
+| <template>
+| content
+| <td>
+
+#data
+<table><template><td></template>
+#errors
+no doctype
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <template>
+| content
+| <td>
+
+#data
+<body><template><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <td>
+
+#data
+<body><template><template><tr></tr></template><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <template>
+| content
+| <tr>
+| <td>
+
+#data
+<table><colgroup><template><col>
+#errors
+no doctype
+eof in template
+eof in table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <colgroup>
+| <template>
+| content
+| <col>
+
+#data
+<frameset><template><frame></frame></template></frameset>
+#errors
+ * (1,11) missing DOCTYPE
+ * (1,21) unexpected start tag token
+ * (1,36) unexpected end tag token
+ * (1,47) unexpected end tag token
+#document
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<template><frame></frame></frameset><frame></frame></template>
+#errors
+ * (1,11) missing DOCTYPE
+ * (1,18) unexpected start tag
+ * (1,26) unexpected end tag
+ * (1,37) unexpected end tag
+ * (1,44) unexpected start tag
+ * (1,52) unexpected end tag
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <body>
+
+#data
+<template><div><frameset><span></span></div><span></span></template>
+#errors
+no doctype
+bad frameset
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <div>
+| <span>
+| <span>
+| <body>
+
+#data
+<body><template><div><frameset><span></span></div><span></span></template></body>
+#errors
+no doctype
+bad frameset
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <div>
+| <span>
+| <span>
+
+#data
+<body><template><script>var i = 1;</script><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <script>
+| "var i = 1;"
+| <td>
+
+#data
+<body><template><tr><div></div></tr></template>
+#errors
+no doctype
+foster-parented div
+foster-parented /div
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <tr>
+| <div>
+
+#data
+<body><template><tr></tr><td></td></template>
+#errors
+no doctype
+unexpected <td>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <tr>
+| <tr>
+| <td>
+
+#data
+<body><template><td></td></tr><td></td></template>
+#errors
+no doctype
+bad </tr>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <td>
+| <td>
+
+#data
+<body><template><td></td><tbody><td></td></template>
+#errors
+no doctype
+bad <tbody>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <td>
+| <td>
+
+#data
+<body><template><td></td><caption></caption><td></td></template>
+#errors
+ * (1,7) missing DOCTYPE
+ * (1,35) unexpected start tag in table row
+ * (1,45) unexpected end tag in table row
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <td>
+| <td>
+
+#data
+<body><template><td></td><colgroup></caption><td></td></template>
+#errors
+ * (1,7) missing DOCTYPE
+ * (1,36) unexpected start tag in table row
+ * (1,46) unexpected end tag in table row
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <td>
+| <td>
+
+#data
+<body><template><td></td></table><td></td></template>
+#errors
+no doctype
+bad </table>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <td>
+| <td>
+
+#data
+<body><template><tr></tr><tbody><tr></tr></template>
+#errors
+no doctype
+bad <tbody>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <tr>
+| <tr>
+
+#data
+<body><template><tr></tr><caption><tr></tr></template>
+#errors
+no doctype
+bad <caption>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <tr>
+| <tr>
+
+#data
+<body><template><tr></tr></table><tr></tr></template>
+#errors
+no doctype
+bad </table>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <tr>
+| <tr>
+
+#data
+<body><template><thead></thead><caption></caption><tbody></tbody></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <thead>
+| <caption>
+| <tbody>
+
+#data
+<body><template><thead></thead></table><tbody></tbody></template></body>
+#errors
+no doctype
+bad </table>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <thead>
+| <tbody>
+
+#data
+<body><template><div><tr></tr></div></template>
+#errors
+no doctype
+bad tr
+bad /tr
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <div>
+
+#data
+<body><template><em>Hello</em></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <em>
+| "Hello"
+
+#data
+<body><template><!--comment--></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <!-- comment -->
+
+#data
+<body><template><style></style><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <style>
+| <td>
+
+#data
+<body><template><meta><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <meta>
+| <td>
+
+#data
+<body><template><link><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <link>
+| <td>
+
+#data
+<body><template><template><tr></tr></template><td></td></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <template>
+| content
+| <tr>
+| <td>
+
+#data
+<body><table><colgroup><template><col></col></template></colgroup></table></body>
+#errors
+no doctype
+bad /col
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <colgroup>
+| <template>
+| content
+| <col>
+
+#data
+<body a=b><template><div></div><body c=d><div></div></body></template></body>
+#errors
+no doctype
+bad <body>
+bad </body>
+#document
+| <html>
+| <head>
+| <body>
+| a="b"
+| <template>
+| content
+| <div>
+| <div>
+
+#data
+<html a=b><template><div><html b=c><span></template>
+#errors
+no doctype
+bad <html>
+missing end tags in template
+#document
+| <html>
+| a="b"
+| <head>
+| <template>
+| content
+| <div>
+| <span>
+| <body>
+
+#data
+<html a=b><template><col></col><html b=c><col></col></template>
+#errors
+no doctype
+bad /col
+bad html
+bad /col
+#document
+| <html>
+| a="b"
+| <head>
+| <template>
+| content
+| <col>
+| <col>
+| <body>
+
+#data
+<html a=b><template><frame></frame><html b=c><frame></frame></template>
+#errors
+no doctype
+bad frame
+bad /frame
+bad html
+bad frame
+bad /frame
+#document
+| <html>
+| a="b"
+| <head>
+| <template>
+| content
+| <body>
+
+#data
+<body><template><tr></tr><template></template><td></td></template>
+#errors
+no doctype
+unexpected <td>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <tr>
+| <template>
+| content
+| <tr>
+| <td>
+
+#data
+<body><template><thead></thead><template><tr></tr></template><tr></tr><tfoot></tfoot></template>
+#errors
+no doctype
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <thead>
+| <template>
+| content
+| <tr>
+| <tbody>
+| <tr>
+| <tfoot>
+
+#data
+<body><template><template><b><template></template></template>text</template>
+#errors
+no doctype
+missing </b>
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <template>
+| content
+| <b>
+| <template>
+| content
+| "text"
+
+#data
+<body><template><col><colgroup>
+#errors
+no doctype
+bad colgroup
+eof in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <col>
+
+#data
+<body><template><col></colgroup>
+#errors
+no doctype
+bogus /colgroup
+eof in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <col>
+
+#data
+<body><template><col><colgroup></template></body>
+#errors
+no doctype
+bad colgroup
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <col>
+
+#data
+<body><template><col><div>
+#errors
+ * (1,7) missing DOCTYPE
+ * (1,27) unexpected token
+ * (1,27) unexpected end of file in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <col>
+
+#data
+<body><template><col></div>
+#errors
+no doctype
+bad /div
+eof in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <col>
+
+#data
+<body><template><col>Hello
+#errors
+no doctype
+unexpected text
+eof in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <col>
+
+#data
+<body><template><i><menu>Foo</i>
+#errors
+no doctype
+mising /menu
+eof in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <i>
+| <menu>
+| <i>
+| "Foo"
+
+#data
+<body><template></div><div>Foo</div><template></template><tr></tr>
+#errors
+no doctype
+bogus /div
+bogus tr
+bogus /tr
+eof in template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+| <div>
+| "Foo"
+| <template>
+| content
+
+#data
+<body><div><template></div><tr><td>Foo</td></tr></template>
+#errors
+ * (1,7) missing DOCTYPE
+ * (1,28) unexpected token in template
+ * (1,60) unexpected end of file
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <template>
+| content
+| <tr>
+| <td>
+| "Foo"
+
+#data
+<template></figcaption><sub><table></table>
+#errors
+no doctype
+bad /figcaption
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <sub>
+| <table>
+| <body>
+
+#data
+<template><template>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <body>
+
+#data
+<template><div>
+#errors
+no doctype
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <div>
+| <body>
+
+#data
+<template><template><div>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <div>
+| <body>
+
+#data
+<template><template><table>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <table>
+| <body>
+
+#data
+<template><template><tbody>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <tbody>
+| <body>
+
+#data
+<template><template><tr>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <tr>
+| <body>
+
+#data
+<template><template><td>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <td>
+| <body>
+
+#data
+<template><template><caption>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <caption>
+| <body>
+
+#data
+<template><template><colgroup>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <colgroup>
+| <body>
+
+#data
+<template><template><col>
+#errors
+no doctype
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <col>
+| <body>
+
+#data
+<template><template><tbody><select>
+#errors
+ * (1,11) missing DOCTYPE
+ * (1,36) unexpected token in table - foster parenting
+ * (1,36) unexpected end of file in template
+ * (1,36) unexpected end of file in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <tbody>
+| <select>
+| <body>
+
+#data
+<template><template><table>Foo
+#errors
+no doctype
+foster-parenting text F
+foster-parenting text o
+foster-parenting text o
+eof
+eof
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| "Foo"
+| <table>
+| <body>
+
+#data
+<template><template><frame>
+#errors
+no doctype
+bad tag
+eof
+eof
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <body>
+
+#data
+<template><template><script>var i
+#errors
+no doctype
+eof in script
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <script>
+| "var i"
+| <body>
+
+#data
+<template><template><style>var i
+#errors
+no doctype
+eof in style
+eof in template
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <template>
+| content
+| <style>
+| "var i"
+| <body>
+
+#data
+<template><table></template><body><span>Foo
+#errors
+no doctype
+missing /table
+bad eof
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <table>
+| <body>
+| <span>
+| "Foo"
+
+#data
+<template><td></template><body><span>Foo
+#errors
+no doctype
+bad eof
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <td>
+| <body>
+| <span>
+| "Foo"
+
+#data
+<template><object></template><body><span>Foo
+#errors
+no doctype
+missing /object
+bad eof
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <object>
+| <body>
+| <span>
+| "Foo"
+
+#data
+<template><svg><template>
+#errors
+no doctype
+eof in template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <svg svg>
+| <svg template>
+| <body>
+
+#data
+<template><svg><foo><template><foreignObject><div></template><div>
+#errors
+no doctype
+ugly template closure
+bad eof
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <svg svg>
+| <svg foo>
+| <svg template>
+| <svg foreignObject>
+| <div>
+| <body>
+| <div>
+
+#data
+<dummy><template><span></dummy>
+#errors
+no doctype
+bad end tag </dummy>
+eof in template
+eof in dummy
+#document
+| <html>
+| <head>
+| <body>
+| <dummy>
+| <template>
+| content
+| <span>
+
+#data
+<body><table><tr><td><select><template>Foo</template><caption>A</table>
+#errors
+no doctype
+(1,62): unexpected-caption-in-select-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| <template>
+| content
+| "Foo"
+| <caption>
+| "A"
+
+#data
+<body></body><template>
+#errors
+no doctype
+(1,23): template-after-body
+(1,24): eof-in-template
+#document
+| <html>
+| <head>
+| <body>
+| <template>
+| content
+
+#data
+<head></head><template>
+#errors
+no doctype
+(1,23): template-after-head
+(1,24): eof-in-template
+#document
+| <html>
+| <head>
+| <template>
+| content
+| <body>
+
+#data
+<head></head><template>Foo</template>
+#errors
+no doctype
+(1,23): template-after-head
+#document
+| <html>
+| <head>
+| <template>
+| content
+| "Foo"
+| <body>
+
+#data
+<!DOCTYPE HTML><dummy><table><template><table><template><table><script>
+#errors
+eof script
+eof template
+eof template
+eof table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dummy>
+| <table>
+| <template>
+| content
+| <table>
+| <template>
+| content
+| <table>
+| <script>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests1.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests1.dat
new file mode 100644
index 000000000..d6726e305
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests1.dat
@@ -0,0 +1,1959 @@
+#data
+Test
+#errors
+(1,0): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "Test"
+
+#data
+<p>One<p>Two
+#errors
+(1,3): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| "One"
+| <p>
+| "Two"
+
+#data
+Line1<br>Line2<br>Line3<br>Line4
+#errors
+(1,0): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "Line1"
+| <br>
+| "Line2"
+| <br>
+| "Line3"
+| <br>
+| "Line4"
+
+#data
+<html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<head>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<body>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></head>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></head><body>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></head><body></body>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head><body></body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head></body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><head><body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<html><body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<head></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+</head>
+#errors
+(1,7): expected-doctype-but-got-end-tag
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+</body>
+#errors
+(1,7): expected-doctype-but-got-end-tag element.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+</html>
+#errors
+(1,7): expected-doctype-but-got-end-tag element.
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<b><table><td><i></table>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,14): unexpected-cell-in-table-body
+(1,25): unexpected-cell-end-tag
+(1,25): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <i>
+
+#data
+<b><table><td></b><i></table>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,14): unexpected-cell-in-table-body
+(1,18): unexpected-end-tag
+(1,29): unexpected-cell-end-tag
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <i>
+| "X"
+
+#data
+<h1>Hello<h2>World
+#errors
+(1,4): expected-doctype-but-got-start-tag
+(1,13): unexpected-start-tag
+(1,18): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <h1>
+| "Hello"
+| <h2>
+| "World"
+
+#data
+<a><p>X<a>Y</a>Z</p></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,10): unexpected-start-tag-implies-end-tag
+(1,10): adoption-agency-1.3
+(1,24): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <p>
+| <a>
+| "X"
+| <a>
+| "Y"
+| "Z"
+
+#data
+<b><button>foo</b>bar
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,18): adoption-agency-1.3
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <button>
+| <b>
+| "foo"
+| "bar"
+
+#data
+<!DOCTYPE html><span><button>foo</span>bar
+#errors
+(1,39): unexpected-end-tag
+(1,42): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <span>
+| <button>
+| "foobar"
+
+#data
+<p><b><div><marquee></p></b></div>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,11): unexpected-end-tag
+(1,24): unexpected-end-tag
+(1,28): unexpected-end-tag
+(1,34): end-tag-too-early
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <div>
+| <b>
+| <marquee>
+| <p>
+| "X"
+
+#data
+<script><div></script></div><title><p></title><p><p>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,28): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<div>"
+| <title>
+| "<p>"
+| <body>
+| <p>
+| <p>
+
+#data
+<!--><div>--<!-->
+#errors
+(1,5): incorrect-comment
+(1,10): expected-doctype-but-got-start-tag
+(1,17): incorrect-comment
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <!-- -->
+| <html>
+| <head>
+| <body>
+| <div>
+| "--"
+| <!-- -->
+
+#data
+<p><hr></p>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,11): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <hr>
+| <p>
+
+#data
+<select><b><option><select><option></b></select>X
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,11): unexpected-start-tag-in-select
+(1,27): unexpected-select-in-select
+(1,39): unexpected-end-tag
+(1,48): unexpected-end-tag
+(1,49): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <option>
+| "X"
+
+#data
+<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,14): unexpected-cell-in-table-body
+(1,35): unexpected-start-tag-implies-end-tag
+(1,40): unexpected-cell-end-tag
+(1,43): unexpected-start-tag-implies-table-voodoo
+(1,43): unexpected-start-tag-implies-end-tag
+(1,43): unexpected-end-tag
+(1,63): unexpected-start-tag-implies-end-tag
+(1,64): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <a>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| <table>
+| <a>
+| <a>
+| <b>
+| "X"
+| "C"
+| <a>
+| "Y"
+
+#data
+<a X>0<b>1<a Y>2
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,15): unexpected-start-tag-implies-end-tag
+(1,15): adoption-agency-1.3
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| x=""
+| "0"
+| <b>
+| "1"
+| <b>
+| <a>
+| y=""
+| "2"
+
+#data
+<!-----><font><div>hello<table>excite!<b>me!<th><i>please!</tr><!--X-->
+#errors
+(1,7): unexpected-dash-after-double-dash-in-comment
+(1,14): expected-doctype-but-got-start-tag
+(1,41): unexpected-start-tag-implies-table-voodoo
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): foster-parenting-character-in-table
+(1,48): unexpected-cell-in-table-body
+(1,63): unexpected-cell-end-tag
+(1,71): eof-in-table
+#document
+| <!-- - -->
+| <html>
+| <head>
+| <body>
+| <font>
+| <div>
+| "helloexcite!"
+| <b>
+| "me!"
+| <table>
+| <tbody>
+| <tr>
+| <th>
+| <i>
+| "please!"
+| <!-- X -->
+
+#data
+<!DOCTYPE html><li>hello<li>world<ul>how<li>do</ul>you</body><!--do-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <li>
+| "hello"
+| <li>
+| "world"
+| <ul>
+| "how"
+| <li>
+| "do"
+| "you"
+| <!-- do -->
+
+#data
+<!DOCTYPE html>A<option>B<optgroup>C<select>D</option>E
+#errors
+(1,54): unexpected-end-tag-in-select
+(1,55): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "A"
+| <option>
+| "B"
+| <optgroup>
+| "C"
+| <select>
+| "DE"
+
+#data
+<
+#errors
+(1,1): expected-tag-name
+(1,1): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "<"
+
+#data
+<#
+#errors
+(1,1): expected-tag-name
+(1,1): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "<#"
+
+#data
+</
+#errors
+(1,2): expected-closing-tag-but-got-eof
+(1,2): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "</"
+
+#data
+</#
+#errors
+(1,2): expected-closing-tag-but-got-char
+(1,3): expected-doctype-but-got-eof
+#document
+| <!-- # -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,2): expected-doctype-but-got-eof
+#document
+| <!-- ? -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?#
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,3): expected-doctype-but-got-eof
+#document
+| <!-- ?# -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!
+#errors
+(1,2): expected-dashes-or-doctype
+(1,2): expected-doctype-but-got-eof
+#document
+| <!-- -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!#
+#errors
+(1,2): expected-dashes-or-doctype
+(1,3): expected-doctype-but-got-eof
+#document
+| <!-- # -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?COMMENT?>
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,11): expected-doctype-but-got-eof
+#document
+| <!-- ?COMMENT? -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!COMMENT>
+#errors
+(1,2): expected-dashes-or-doctype
+(1,10): expected-doctype-but-got-eof
+#document
+| <!-- COMMENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+</ COMMENT >
+#errors
+(1,2): expected-closing-tag-but-got-char
+(1,12): expected-doctype-but-got-eof
+#document
+| <!-- COMMENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+<?COM--MENT?>
+#errors
+(1,1): expected-tag-name-but-got-question-mark
+(1,13): expected-doctype-but-got-eof
+#document
+| <!-- ?COM--MENT? -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!COM--MENT>
+#errors
+(1,2): expected-dashes-or-doctype
+(1,12): expected-doctype-but-got-eof
+#document
+| <!-- COM--MENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+</ COM--MENT >
+#errors
+(1,2): expected-closing-tag-but-got-char
+(1,14): expected-doctype-but-got-eof
+#document
+| <!-- COM--MENT -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><style> EOF
+#errors
+(1,26): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| " EOF"
+| <body>
+
+#data
+<!DOCTYPE html><script> <!-- </script> --> </script> EOF
+#errors
+(1,52): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| " <!-- "
+| " "
+| <body>
+| "--> EOF"
+
+#data
+<b><p></b>TEST
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,10): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <p>
+| <b>
+| "TEST"
+
+#data
+<p id=a><b><p id=b></b>TEST
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,19): unexpected-end-tag
+(1,23): adoption-agency-1.2
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| id="a"
+| <b>
+| <p>
+| id="b"
+| "TEST"
+
+#data
+<b id=a><p><b id=b></p></b>TEST
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,23): unexpected-end-tag
+(1,27): adoption-agency-1.2
+(1,31): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| id="a"
+| <p>
+| <b>
+| id="b"
+| "TEST"
+
+#data
+<!DOCTYPE html><title>U-test</title><body><div><p>Test<u></p></div></body>
+#errors
+(1,61): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "U-test"
+| <body>
+| <div>
+| <p>
+| "Test"
+| <u>
+
+#data
+<!DOCTYPE html><font><table></font></table></font>
+#errors
+(1,35): unexpected-end-tag-implies-table-voodoo
+(1,35): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <font>
+| <table>
+
+#data
+<font><p>hello<b>cruel</font>world
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,29): adoption-agency-1.3
+(1,29): adoption-agency-1.3
+(1,34): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <font>
+| <p>
+| <font>
+| "hello"
+| <b>
+| "cruel"
+| <b>
+| "world"
+
+#data
+<b>Test</i>Test
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,11): unexpected-end-tag
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "TestTest"
+
+#data
+<b>A<cite>B<div>C
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "A"
+| <cite>
+| "B"
+| <div>
+| "C"
+
+#data
+<b>A<cite>B<div>C</cite>D
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,24): unexpected-end-tag
+(1,25): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "A"
+| <cite>
+| "B"
+| <div>
+| "CD"
+
+#data
+<b>A<cite>B<div>C</b>D
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,21): adoption-agency-1.3
+(1,22): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "A"
+| <cite>
+| "B"
+| <div>
+| <b>
+| "C"
+| "D"
+
+#data
+
+#errors
+(1,0): expected-doctype-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<DIV>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,5): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<DIV> abc
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,9): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc"
+
+#data
+<DIV> abc <B>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,13): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+
+#data
+<DIV> abc <B> def
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def"
+
+#data
+<DIV> abc <B> def <I>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+
+#data
+<DIV> abc <B> def <I> ghi
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,25): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi"
+
+#data
+<DIV> abc <B> def <I> ghi <P>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,29): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <p>
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <p>
+| " jkl"
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,38): adoption-agency-1.3
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <b>
+| " jkl "
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,38): adoption-agency-1.3
+(1,42): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <b>
+| " jkl "
+| " mno"
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,38): adoption-agency-1.3
+(1,47): adoption-agency-1.3
+(1,47): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,38): adoption-agency-1.3
+(1,47): adoption-agency-1.3
+(1,51): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+| " pqr"
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,38): adoption-agency-1.3
+(1,47): adoption-agency-1.3
+(1,56): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+| " pqr "
+
+#data
+<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P> stu
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,38): adoption-agency-1.3
+(1,47): adoption-agency-1.3
+(1,60): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| " abc "
+| <b>
+| " def "
+| <i>
+| " ghi "
+| <i>
+| <p>
+| <i>
+| <b>
+| " jkl "
+| " mno "
+| " pqr "
+| " stu"
+
+#data
+<test attribute---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------->
+#errors
+(1,1040): expected-doctype-but-got-start-tag
+(1,1040): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <test>
+| attribute----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------=""
+
+#data
+<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe
+#errors
+(1,15): expected-doctype-but-got-start-tag
+(1,39): unexpected-start-tag-implies-table-voodoo
+(1,39): unexpected-start-tag-implies-end-tag
+(1,39): unexpected-end-tag
+(1,45): foster-parenting-character-in-table
+(1,45): foster-parenting-character-in-table
+(1,68): foster-parenting-character-in-table
+(1,71): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="blah"
+| "aba"
+| <a>
+| href="foo"
+| "br"
+| <a>
+| href="foo"
+| "x"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| href="foo"
+| "aoe"
+
+#data
+<a href="blah">aba<table><tr><td><a href="foo">br</td></tr>x</table>aoe
+#errors
+(1,15): expected-doctype-but-got-start-tag
+(1,54): unexpected-cell-end-tag
+(1,68): unexpected text in table
+(1,71): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="blah"
+| "abax"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| href="foo"
+| "br"
+| "aoe"
+
+#data
+<table><a href="blah">aba<tr><td><a href="foo">br</td></tr>x</table>aoe
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,22): unexpected-start-tag-implies-table-voodoo
+(1,29): foster-parenting-character-in-table
+(1,29): foster-parenting-character-in-table
+(1,29): foster-parenting-character-in-table
+(1,54): unexpected-cell-end-tag
+(1,68): foster-parenting-character-in-table
+(1,71): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="blah"
+| "aba"
+| <a>
+| href="blah"
+| "x"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <a>
+| href="foo"
+| "br"
+| <a>
+| href="blah"
+| "aoe"
+
+#data
+<a href=a>aa<marquee>aa<a href=b>bb</marquee>aa
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,45): end-tag-too-early
+(1,47): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| href="a"
+| "aa"
+| <marquee>
+| "aa"
+| <a>
+| href="b"
+| "bb"
+| "aa"
+
+#data
+<wbr><strike><code></strike><code><strike></code>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,28): adoption-agency-1.3
+(1,49): adoption-agency-1.3
+(1,49): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <wbr>
+| <strike>
+| <code>
+| <code>
+| <code>
+| <strike>
+
+#data
+<!DOCTYPE html><spacer>foo
+#errors
+(1,26): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <spacer>
+| "foo"
+
+#data
+<title><meta></title><link><title><meta></title>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <title>
+| "<meta>"
+| <link>
+| <title>
+| "<meta>"
+| <body>
+
+#data
+<style><!--</style><meta><script>--><link></script>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <meta>
+| <script>
+| "--><link>"
+| <body>
+
+#data
+<head><meta></head><link>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,25): unexpected-start-tag-out-of-my-head
+#document
+| <html>
+| <head>
+| <meta>
+| <link>
+| <body>
+
+#data
+<table><tr><tr><td><td><span><th><span>X</table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,33): unexpected-cell-end-tag
+(1,48): unexpected-cell-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <tr>
+| <td>
+| <td>
+| <span>
+| <th>
+| <span>
+| "X"
+
+#data
+<body><body><base><link><meta><title><p></title><body><p></body>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,12): unexpected-start-tag
+(1,54): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <base>
+| <link>
+| <meta>
+| <title>
+| "<p>"
+| <p>
+
+#data
+<textarea><p></textarea>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<p>"
+
+#data
+<p></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
+#errors
+(1,9): expected-doctype-but-got-end-tag
+(1,9): unexpected-end-tag-before-html
+(1,13): unexpected-end-tag-before-html
+(1,18): unexpected-end-tag-before-html
+(1,22): unexpected-end-tag-before-html
+(1,26): unexpected-end-tag-before-html
+(1,35): unexpected-end-tag-before-html
+(1,39): unexpected-end-tag-before-html
+(1,47): unexpected-end-tag-before-html
+(1,52): unexpected-end-tag-before-html
+(1,58): unexpected-end-tag-before-html
+(1,64): unexpected-end-tag-before-html
+(1,72): unexpected-end-tag-before-html
+(1,79): unexpected-end-tag-before-html
+(1,88): unexpected-end-tag-before-html
+(1,93): unexpected-end-tag-before-html
+(1,98): unexpected-end-tag-before-html
+(1,103): unexpected-end-tag-before-html
+(1,108): unexpected-end-tag-before-html
+(1,113): unexpected-end-tag-before-html
+(1,118): unexpected-end-tag-before-html
+(1,130): unexpected-end-tag-after-body
+(1,130): unexpected-end-tag-treated-as
+(1,134): unexpected-end-tag
+(1,140): unexpected-end-tag
+(1,148): unexpected-end-tag
+(1,155): unexpected-end-tag
+(1,163): unexpected-end-tag
+(1,172): unexpected-end-tag
+(1,180): unexpected-end-tag
+(1,185): unexpected-end-tag
+(1,190): unexpected-end-tag
+(1,195): unexpected-end-tag
+(1,203): unexpected-end-tag
+(1,210): unexpected-end-tag
+(1,217): unexpected-end-tag
+(1,225): unexpected-end-tag
+(1,230): unexpected-end-tag
+(1,238): unexpected-end-tag
+(1,244): unexpected-end-tag
+(1,251): unexpected-end-tag
+(1,258): unexpected-end-tag
+(1,269): unexpected-end-tag
+(1,279): unexpected-end-tag
+(1,287): unexpected-end-tag
+(1,296): unexpected-end-tag
+(1,300): unexpected-end-tag
+(1,305): unexpected-end-tag
+(1,310): unexpected-end-tag
+(1,320): unexpected-end-tag
+(1,331): unexpected-end-tag
+(1,339): unexpected-end-tag
+(1,347): unexpected-end-tag
+(1,355): unexpected-end-tag
+(1,365): end-tag-too-early
+(1,378): end-tag-too-early
+(1,387): end-tag-too-early
+(1,393): end-tag-too-early
+(1,399): end-tag-too-early
+(1,404): end-tag-too-early
+(1,415): end-tag-too-early
+(1,425): end-tag-too-early
+(1,432): end-tag-too-early
+(1,437): end-tag-too-early
+(1,442): end-tag-too-early
+(1,447): unexpected-end-tag
+(1,454): unexpected-end-tag
+(1,460): unexpected-end-tag
+(1,467): unexpected-end-tag
+(1,476): end-tag-too-early
+(1,486): end-tag-too-early
+(1,495): end-tag-too-early
+(1,513): expected-eof-but-got-end-tag
+(1,513): unexpected-end-tag
+(1,520): unexpected-end-tag
+(1,529): unexpected-end-tag
+(1,537): unexpected-end-tag
+(1,547): unexpected-end-tag
+(1,557): unexpected-end-tag
+(1,568): unexpected-end-tag
+(1,579): unexpected-end-tag
+(1,590): unexpected-end-tag
+(1,599): unexpected-end-tag
+(1,611): unexpected-end-tag
+(1,622): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+| <p>
+
+#data
+<table><tr></strong></b></em></i></u></strike></s></blink></tt></pre></big></small></font></select></h1></h2></h3></h4></h5></h6></body></br></a></img></title></span></style></script></table></th></td></tr></frame></area></link></param></hr></input></col></base></meta></basefont></bgsound></embed></spacer></p></dd></dt></caption></colgroup></tbody></tfoot></thead></address></blockquote></center></dir></div></dl></fieldset></listing></menu></ol></ul></li></nobr></wbr></form></button></marquee></object></html></frameset></head></iframe></image></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,20): unexpected-end-tag-implies-table-voodoo
+(1,20): unexpected-end-tag
+(1,24): unexpected-end-tag-implies-table-voodoo
+(1,24): unexpected-end-tag
+(1,29): unexpected-end-tag-implies-table-voodoo
+(1,29): unexpected-end-tag
+(1,33): unexpected-end-tag-implies-table-voodoo
+(1,33): unexpected-end-tag
+(1,37): unexpected-end-tag-implies-table-voodoo
+(1,37): unexpected-end-tag
+(1,46): unexpected-end-tag-implies-table-voodoo
+(1,46): unexpected-end-tag
+(1,50): unexpected-end-tag-implies-table-voodoo
+(1,50): unexpected-end-tag
+(1,58): unexpected-end-tag-implies-table-voodoo
+(1,58): unexpected-end-tag
+(1,63): unexpected-end-tag-implies-table-voodoo
+(1,63): unexpected-end-tag
+(1,69): unexpected-end-tag-implies-table-voodoo
+(1,69): end-tag-too-early
+(1,75): unexpected-end-tag-implies-table-voodoo
+(1,75): unexpected-end-tag
+(1,83): unexpected-end-tag-implies-table-voodoo
+(1,83): unexpected-end-tag
+(1,90): unexpected-end-tag-implies-table-voodoo
+(1,90): unexpected-end-tag
+(1,99): unexpected-end-tag-implies-table-voodoo
+(1,99): unexpected-end-tag
+(1,104): unexpected-end-tag-implies-table-voodoo
+(1,104): end-tag-too-early
+(1,109): unexpected-end-tag-implies-table-voodoo
+(1,109): end-tag-too-early
+(1,114): unexpected-end-tag-implies-table-voodoo
+(1,114): end-tag-too-early
+(1,119): unexpected-end-tag-implies-table-voodoo
+(1,119): end-tag-too-early
+(1,124): unexpected-end-tag-implies-table-voodoo
+(1,124): end-tag-too-early
+(1,129): unexpected-end-tag-implies-table-voodoo
+(1,129): end-tag-too-early
+(1,136): unexpected-end-tag-in-table-row
+(1,141): unexpected-end-tag-implies-table-voodoo
+(1,141): unexpected-end-tag-treated-as
+(1,145): unexpected-end-tag-implies-table-voodoo
+(1,145): unexpected-end-tag
+(1,151): unexpected-end-tag-implies-table-voodoo
+(1,151): unexpected-end-tag
+(1,159): unexpected-end-tag-implies-table-voodoo
+(1,159): unexpected-end-tag
+(1,166): unexpected-end-tag-implies-table-voodoo
+(1,166): unexpected-end-tag
+(1,174): unexpected-end-tag-implies-table-voodoo
+(1,174): unexpected-end-tag
+(1,183): unexpected-end-tag-implies-table-voodoo
+(1,183): unexpected-end-tag
+(1,196): unexpected-end-tag
+(1,201): unexpected-end-tag
+(1,206): unexpected-end-tag
+(1,214): unexpected-end-tag
+(1,221): unexpected-end-tag
+(1,228): unexpected-end-tag
+(1,236): unexpected-end-tag
+(1,241): unexpected-end-tag
+(1,249): unexpected-end-tag
+(1,255): unexpected-end-tag
+(1,262): unexpected-end-tag
+(1,269): unexpected-end-tag
+(1,280): unexpected-end-tag
+(1,290): unexpected-end-tag
+(1,298): unexpected-end-tag
+(1,307): unexpected-end-tag
+(1,311): unexpected-end-tag
+(1,316): unexpected-end-tag
+(1,321): unexpected-end-tag
+(1,331): unexpected-end-tag
+(1,342): unexpected-end-tag
+(1,350): unexpected-end-tag
+(1,358): unexpected-end-tag
+(1,366): unexpected-end-tag
+(1,376): end-tag-too-early
+(1,389): end-tag-too-early
+(1,398): end-tag-too-early
+(1,404): end-tag-too-early
+(1,410): end-tag-too-early
+(1,415): end-tag-too-early
+(1,426): end-tag-too-early
+(1,436): end-tag-too-early
+(1,443): end-tag-too-early
+(1,448): end-tag-too-early
+(1,453): end-tag-too-early
+(1,458): unexpected-end-tag
+(1,465): unexpected-end-tag
+(1,471): unexpected-end-tag
+(1,478): unexpected-end-tag
+(1,487): end-tag-too-early
+(1,497): end-tag-too-early
+(1,506): end-tag-too-early
+(1,524): expected-eof-but-got-end-tag
+(1,524): unexpected-end-tag
+(1,531): unexpected-end-tag
+(1,540): unexpected-end-tag
+(1,548): unexpected-end-tag
+(1,558): unexpected-end-tag
+(1,568): unexpected-end-tag
+(1,579): unexpected-end-tag
+(1,590): unexpected-end-tag
+(1,601): unexpected-end-tag
+(1,610): unexpected-end-tag
+(1,622): unexpected-end-tag
+(1,633): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+| <table>
+| <tbody>
+| <tr>
+| <p>
+
+#data
+<frameset>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,10): eof-in-frameset
+#document
+| <html>
+| <head>
+| <frameset>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests10.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests10.dat
new file mode 100644
index 000000000..3e9a9f19b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests10.dat
@@ -0,0 +1,847 @@
+#data
+<!DOCTYPE html><svg></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<!DOCTYPE html><svg></svg><![CDATA[a]]>
+#errors
+(1,28) expected-dashes-or-doctype
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <!-- [CDATA[a]] -->
+
+#data
+<!DOCTYPE html><body><svg></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<!DOCTYPE html><body><select><svg></svg></select>
+#errors
+(1,34) unexpected-start-tag-in-select
+(1,40) unexpected-end-tag-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!DOCTYPE html><body><select><option><svg></svg></option></select>
+#errors
+(1,42) unexpected-start-tag-in-select
+(1,48) unexpected-end-tag-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!DOCTYPE html><body><table><svg></svg></table>
+#errors
+(1,33) foster-parenting-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <table>
+
+#data
+<!DOCTYPE html><body><table><svg><g>foo</g></svg></table>
+#errors
+(1,33) foster-parenting-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><svg><g>foo</g><g>bar</g></svg></table>
+#errors
+(1,33) foster-parenting-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><tbody><svg><g>foo</g><g>bar</g></svg></tbody></table>
+#errors
+(1,40) foster-parenting-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <table>
+| <tbody>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><svg><g>foo</g><g>bar</g></svg></tr></tbody></table>
+#errors
+(1,44) foster-parenting-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg></td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg><p>baz</td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g></svg><p>baz</caption></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+(1,65) unexpected-html-element-in-foreign-content
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g>baz</table><p>quux
+#errors
+(1,73) unexpected-end-tag
+(1,73) expected-one-end-tag-but-got-another
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><colgroup><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+(1,43) foster-parenting-start-tag svg
+(1,66) unexpected HTML-like start tag token in foreign content
+(1,66) foster-parenting-start-tag
+(1,67) foster-parenting-character
+(1,68) foster-parenting-character
+(1,69) foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+| <table>
+| <colgroup>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><tr><td><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+(1,49) unexpected-start-tag-in-select
+(1,52) unexpected-start-tag-in-select
+(1,59) unexpected-end-tag-in-select
+(1,62) unexpected-start-tag-in-select
+(1,69) unexpected-end-tag-in-select
+(1,72) unexpected-start-tag-in-select
+(1,83) unexpected-table-element-end-tag-in-select-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| "foobarbaz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
+#errors
+(1,36) unexpected-start-tag-implies-table-voodoo
+(1,41) unexpected-start-tag-in-select
+(1,44) unexpected-start-tag-in-select
+(1,51) unexpected-end-tag-in-select
+(1,54) unexpected-start-tag-in-select
+(1,61) unexpected-end-tag-in-select
+(1,64) unexpected-start-tag-in-select
+(1,75) unexpected-table-element-end-tag-in-select-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "foobarbaz"
+| <table>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body></body></html><svg><g>foo</g><g>bar</g><p>baz
+#errors
+(1,40) expected-eof-but-got-start-tag
+(1,63) unexpected-html-element-in-foreign-content
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body></body><svg><g>foo</g><g>bar</g><p>baz
+#errors
+(1,33) unexpected-start-tag-after-body
+(1,56) unexpected-html-element-in-foreign-content
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg g>
+| "foo"
+| <svg g>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><frameset><svg><g></g><g></g><p><span>
+#errors
+(1,30) unexpected-start-tag-in-frameset
+(1,33) unexpected-start-tag-in-frameset
+(1,37) unexpected-end-tag-in-frameset
+(1,40) unexpected-start-tag-in-frameset
+(1,44) unexpected-end-tag-in-frameset
+(1,47) unexpected-start-tag-in-frameset
+(1,53) unexpected-start-tag-in-frameset
+(1,53) eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><frameset></frameset><svg><g></g><g></g><p><span>
+#errors
+(1,41) unexpected-start-tag-after-frameset
+(1,44) unexpected-start-tag-after-frameset
+(1,48) unexpected-end-tag-after-frameset
+(1,51) unexpected-start-tag-after-frameset
+(1,55) unexpected-end-tag-after-frameset
+(1,58) unexpected-start-tag-after-frameset
+(1,64) unexpected-start-tag-after-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><body xlink:href=foo><svg xlink:href=foo></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| <svg svg>
+| xlink href="foo"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo></g></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <svg svg>
+| <svg g>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <svg svg>
+| <svg g>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo />bar</svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <svg svg>
+| <svg g>
+| xlink href="foo"
+| xml lang="en"
+| "bar"
+
+#data
+<svg></path>
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,12) unexpected-end-tag
+(1,12) unexpected-end-tag
+(1,12) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<div><svg></div>a
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,16) unexpected-end-tag
+(1,16) end-tag-too-early
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| "a"
+
+#data
+<div><svg><path></div>a
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,22) unexpected-end-tag
+(1,22) end-tag-too-early
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| "a"
+
+#data
+<div><svg><path></svg><path>
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,22) unexpected-end-tag
+(1,28) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <path>
+
+#data
+<div><svg><path><foreignObject><math></div>a
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,43) unexpected-end-tag
+(1,43) end-tag-too-early
+(1,44) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <svg foreignObject>
+| <math math>
+| "a"
+
+#data
+<div><svg><path><foreignObject><p></div>a
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,40) end-tag-too-early
+(1,41) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <svg foreignObject>
+| <p>
+| "a"
+
+#data
+<!DOCTYPE html><svg><desc><div><svg><ul>a
+#errors
+(1,40) unexpected-html-element-in-foreign-content
+(1,41) expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg desc>
+| <div>
+| <svg svg>
+| <ul>
+| "a"
+
+#data
+<!DOCTYPE html><svg><desc><svg><ul>a
+#errors
+(1,35) unexpected-html-element-in-foreign-content
+(1,36) expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg desc>
+| <svg svg>
+| <ul>
+| "a"
+
+#data
+<!DOCTYPE html><p><svg><desc><p>
+#errors
+(1,32) expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <svg svg>
+| <svg desc>
+| <p>
+
+#data
+<!DOCTYPE html><p><svg><title><p>
+#errors
+(1,33) expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <svg svg>
+| <svg title>
+| <p>
+
+#data
+<div><svg><path><foreignObject><p></foreignObject><p>
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,50) unexpected-end-tag
+(1,53) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <svg svg>
+| <svg path>
+| <svg foreignObject>
+| <p>
+| <p>
+
+#data
+<math><mi><div><object><div><span></span></div></object></div></mi><mi>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,71) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <div>
+| <object>
+| <div>
+| <span>
+| <math mi>
+
+#data
+<math><mi><svg><foreignObject><div><div></div></div></foreignObject></svg></mi><mi>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,83) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <svg svg>
+| <svg foreignObject>
+| <div>
+| <div>
+| <math mi>
+
+#data
+<svg><script></script><path>
+#errors
+(1,5) expected-doctype-but-got-start-tag
+(1,28) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg script>
+| <svg path>
+
+#data
+<table><svg></svg><tr>
+#errors
+(1,7) expected-doctype-but-got-start-tag
+(1,12) unexpected-start-tag-implies-table-voodoo
+(1,22) eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<math><mi><mglyph>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,18) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <math mglyph>
+
+#data
+<math><mi><malignmark>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,22) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| <math malignmark>
+
+#data
+<math><mo><mglyph>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,18) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mo>
+| <math mglyph>
+
+#data
+<math><mo><malignmark>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,22) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mo>
+| <math malignmark>
+
+#data
+<math><mn><mglyph>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,18) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mn>
+| <math mglyph>
+
+#data
+<math><mn><malignmark>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,22) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mn>
+| <math malignmark>
+
+#data
+<math><ms><mglyph>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,18) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math ms>
+| <math mglyph>
+
+#data
+<math><ms><malignmark>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,22) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math ms>
+| <math malignmark>
+
+#data
+<math><mtext><mglyph>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,21) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mtext>
+| <math mglyph>
+
+#data
+<math><mtext><malignmark>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,25) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mtext>
+| <math malignmark>
+
+#data
+<math><annotation-xml><svg></svg></annotation-xml><mi>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,54) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <math mi>
+
+#data
+<math><annotation-xml><svg><foreignObject><div><math><mi></mi></math><span></span></div></foreignObject><path></path></svg></annotation-xml><mi>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,144) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <svg foreignObject>
+| <div>
+| <math math>
+| <math mi>
+| <span>
+| <svg path>
+| <math mi>
+
+#data
+<math><annotation-xml><svg><foreignObject><math><mi><svg></svg></mi><mo></mo></math><span></span></foreignObject><path></path></svg></annotation-xml><mi>
+#errors
+(1,6) expected-doctype-but-got-start-tag
+(1,153) expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <svg foreignObject>
+| <math math>
+| <math mi>
+| <svg svg>
+| <math mo>
+| <span>
+| <svg path>
+| <math mi>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests11.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests11.dat
new file mode 100644
index 000000000..ad62cdf65
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests11.dat
@@ -0,0 +1,482 @@
+#data
+<!DOCTYPE html><body><svg attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| attributeName=""
+| attributeType=""
+| baseFrequency=""
+| baseProfile=""
+| calcMode=""
+| clipPathUnits=""
+| contentscripttype=""
+| contentstyletype=""
+| diffuseConstant=""
+| edgeMode=""
+| externalresourcesrequired=""
+| filterUnits=""
+| filterres=""
+| glyphRef=""
+| gradientTransform=""
+| gradientUnits=""
+| kernelMatrix=""
+| kernelUnitLength=""
+| keyPoints=""
+| keySplines=""
+| keyTimes=""
+| lengthAdjust=""
+| limitingConeAngle=""
+| markerHeight=""
+| markerUnits=""
+| markerWidth=""
+| maskContentUnits=""
+| maskUnits=""
+| numOctaves=""
+| pathLength=""
+| patternContentUnits=""
+| patternTransform=""
+| patternUnits=""
+| pointsAtX=""
+| pointsAtY=""
+| pointsAtZ=""
+| preserveAlpha=""
+| preserveAspectRatio=""
+| primitiveUnits=""
+| refX=""
+| refY=""
+| repeatCount=""
+| repeatDur=""
+| requiredExtensions=""
+| requiredFeatures=""
+| specularConstant=""
+| specularExponent=""
+| spreadMethod=""
+| startOffset=""
+| stdDeviation=""
+| stitchTiles=""
+| surfaceScale=""
+| systemLanguage=""
+| tableValues=""
+| targetX=""
+| targetY=""
+| textLength=""
+| viewBox=""
+| viewTarget=""
+| xChannelSelector=""
+| yChannelSelector=""
+| zoomAndPan=""
+
+#data
+<!DOCTYPE html><BODY><SVG ATTRIBUTENAME='' ATTRIBUTETYPE='' BASEFREQUENCY='' BASEPROFILE='' CALCMODE='' CLIPPATHUNITS='' CONTENTSCRIPTTYPE='' CONTENTSTYLETYPE='' DIFFUSECONSTANT='' EDGEMODE='' EXTERNALRESOURCESREQUIRED='' FILTERRES='' FILTERUNITS='' GLYPHREF='' GRADIENTTRANSFORM='' GRADIENTUNITS='' KERNELMATRIX='' KERNELUNITLENGTH='' KEYPOINTS='' KEYSPLINES='' KEYTIMES='' LENGTHADJUST='' LIMITINGCONEANGLE='' MARKERHEIGHT='' MARKERUNITS='' MARKERWIDTH='' MASKCONTENTUNITS='' MASKUNITS='' NUMOCTAVES='' PATHLENGTH='' PATTERNCONTENTUNITS='' PATTERNTRANSFORM='' PATTERNUNITS='' POINTSATX='' POINTSATY='' POINTSATZ='' PRESERVEALPHA='' PRESERVEASPECTRATIO='' PRIMITIVEUNITS='' REFX='' REFY='' REPEATCOUNT='' REPEATDUR='' REQUIREDEXTENSIONS='' REQUIREDFEATURES='' SPECULARCONSTANT='' SPECULAREXPONENT='' SPREADMETHOD='' STARTOFFSET='' STDDEVIATION='' STITCHTILES='' SURFACESCALE='' SYSTEMLANGUAGE='' TABLEVALUES='' TARGETX='' TARGETY='' TEXTLENGTH='' VIEWBOX='' VIEWTARGET='' XCHANNELSELECTOR='' YCHANNELSELECTOR='' ZOOMANDPAN=''></SVG>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| attributeName=""
+| attributeType=""
+| baseFrequency=""
+| baseProfile=""
+| calcMode=""
+| clipPathUnits=""
+| contentscripttype=""
+| contentstyletype=""
+| diffuseConstant=""
+| edgeMode=""
+| externalresourcesrequired=""
+| filterUnits=""
+| filterres=""
+| glyphRef=""
+| gradientTransform=""
+| gradientUnits=""
+| kernelMatrix=""
+| kernelUnitLength=""
+| keyPoints=""
+| keySplines=""
+| keyTimes=""
+| lengthAdjust=""
+| limitingConeAngle=""
+| markerHeight=""
+| markerUnits=""
+| markerWidth=""
+| maskContentUnits=""
+| maskUnits=""
+| numOctaves=""
+| pathLength=""
+| patternContentUnits=""
+| patternTransform=""
+| patternUnits=""
+| pointsAtX=""
+| pointsAtY=""
+| pointsAtZ=""
+| preserveAlpha=""
+| preserveAspectRatio=""
+| primitiveUnits=""
+| refX=""
+| refY=""
+| repeatCount=""
+| repeatDur=""
+| requiredExtensions=""
+| requiredFeatures=""
+| specularConstant=""
+| specularExponent=""
+| spreadMethod=""
+| startOffset=""
+| stdDeviation=""
+| stitchTiles=""
+| surfaceScale=""
+| systemLanguage=""
+| tableValues=""
+| targetX=""
+| targetY=""
+| textLength=""
+| viewBox=""
+| viewTarget=""
+| xChannelSelector=""
+| yChannelSelector=""
+| zoomAndPan=""
+
+#data
+<!DOCTYPE html><body><svg attributename='' attributetype='' basefrequency='' baseprofile='' calcmode='' clippathunits='' contentscripttype='' contentstyletype='' diffuseconstant='' edgemode='' externalresourcesrequired='' filterres='' filterunits='' glyphref='' gradienttransform='' gradientunits='' kernelmatrix='' kernelunitlength='' keypoints='' keysplines='' keytimes='' lengthadjust='' limitingconeangle='' markerheight='' markerunits='' markerwidth='' maskcontentunits='' maskunits='' numoctaves='' pathlength='' patterncontentunits='' patterntransform='' patternunits='' pointsatx='' pointsaty='' pointsatz='' preservealpha='' preserveaspectratio='' primitiveunits='' refx='' refy='' repeatcount='' repeatdur='' requiredextensions='' requiredfeatures='' specularconstant='' specularexponent='' spreadmethod='' startoffset='' stddeviation='' stitchtiles='' surfacescale='' systemlanguage='' tablevalues='' targetx='' targety='' textlength='' viewbox='' viewtarget='' xchannelselector='' ychannelselector='' zoomandpan=''></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| attributeName=""
+| attributeType=""
+| baseFrequency=""
+| baseProfile=""
+| calcMode=""
+| clipPathUnits=""
+| contentscripttype=""
+| contentstyletype=""
+| diffuseConstant=""
+| edgeMode=""
+| externalresourcesrequired=""
+| filterUnits=""
+| filterres=""
+| glyphRef=""
+| gradientTransform=""
+| gradientUnits=""
+| kernelMatrix=""
+| kernelUnitLength=""
+| keyPoints=""
+| keySplines=""
+| keyTimes=""
+| lengthAdjust=""
+| limitingConeAngle=""
+| markerHeight=""
+| markerUnits=""
+| markerWidth=""
+| maskContentUnits=""
+| maskUnits=""
+| numOctaves=""
+| pathLength=""
+| patternContentUnits=""
+| patternTransform=""
+| patternUnits=""
+| pointsAtX=""
+| pointsAtY=""
+| pointsAtZ=""
+| preserveAlpha=""
+| preserveAspectRatio=""
+| primitiveUnits=""
+| refX=""
+| refY=""
+| repeatCount=""
+| repeatDur=""
+| requiredExtensions=""
+| requiredFeatures=""
+| specularConstant=""
+| specularExponent=""
+| spreadMethod=""
+| startOffset=""
+| stdDeviation=""
+| stitchTiles=""
+| surfaceScale=""
+| systemLanguage=""
+| tableValues=""
+| targetX=""
+| targetY=""
+| textLength=""
+| viewBox=""
+| viewTarget=""
+| xChannelSelector=""
+| yChannelSelector=""
+| zoomAndPan=""
+
+#data
+<!DOCTYPE html><body><math attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| attributename=""
+| attributetype=""
+| basefrequency=""
+| baseprofile=""
+| calcmode=""
+| clippathunits=""
+| contentscripttype=""
+| contentstyletype=""
+| diffuseconstant=""
+| edgemode=""
+| externalresourcesrequired=""
+| filterres=""
+| filterunits=""
+| glyphref=""
+| gradienttransform=""
+| gradientunits=""
+| kernelmatrix=""
+| kernelunitlength=""
+| keypoints=""
+| keysplines=""
+| keytimes=""
+| lengthadjust=""
+| limitingconeangle=""
+| markerheight=""
+| markerunits=""
+| markerwidth=""
+| maskcontentunits=""
+| maskunits=""
+| numoctaves=""
+| pathlength=""
+| patterncontentunits=""
+| patterntransform=""
+| patternunits=""
+| pointsatx=""
+| pointsaty=""
+| pointsatz=""
+| preservealpha=""
+| preserveaspectratio=""
+| primitiveunits=""
+| refx=""
+| refy=""
+| repeatcount=""
+| repeatdur=""
+| requiredextensions=""
+| requiredfeatures=""
+| specularconstant=""
+| specularexponent=""
+| spreadmethod=""
+| startoffset=""
+| stddeviation=""
+| stitchtiles=""
+| surfacescale=""
+| systemlanguage=""
+| tablevalues=""
+| targetx=""
+| targety=""
+| textlength=""
+| viewbox=""
+| viewtarget=""
+| xchannelselector=""
+| ychannelselector=""
+| zoomandpan=""
+
+#data
+<!DOCTYPE html><body><svg><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg altGlyph>
+| <svg altGlyphDef>
+| <svg altGlyphItem>
+| <svg animateColor>
+| <svg animateMotion>
+| <svg animateTransform>
+| <svg clipPath>
+| <svg feBlend>
+| <svg feColorMatrix>
+| <svg feComponentTransfer>
+| <svg feComposite>
+| <svg feConvolveMatrix>
+| <svg feDiffuseLighting>
+| <svg feDisplacementMap>
+| <svg feDistantLight>
+| <svg feFlood>
+| <svg feFuncA>
+| <svg feFuncB>
+| <svg feFuncG>
+| <svg feFuncR>
+| <svg feGaussianBlur>
+| <svg feImage>
+| <svg feMerge>
+| <svg feMergeNode>
+| <svg feMorphology>
+| <svg feOffset>
+| <svg fePointLight>
+| <svg feSpecularLighting>
+| <svg feSpotLight>
+| <svg feTile>
+| <svg feTurbulence>
+| <svg foreignObject>
+| <svg glyphRef>
+| <svg linearGradient>
+| <svg radialGradient>
+| <svg textPath>
+
+#data
+<!DOCTYPE html><body><svg><altglyph /><altglyphdef /><altglyphitem /><animatecolor /><animatemotion /><animatetransform /><clippath /><feblend /><fecolormatrix /><fecomponenttransfer /><fecomposite /><feconvolvematrix /><fediffuselighting /><fedisplacementmap /><fedistantlight /><feflood /><fefunca /><fefuncb /><fefuncg /><fefuncr /><fegaussianblur /><feimage /><femerge /><femergenode /><femorphology /><feoffset /><fepointlight /><fespecularlighting /><fespotlight /><fetile /><feturbulence /><foreignobject /><glyphref /><lineargradient /><radialgradient /><textpath /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg altGlyph>
+| <svg altGlyphDef>
+| <svg altGlyphItem>
+| <svg animateColor>
+| <svg animateMotion>
+| <svg animateTransform>
+| <svg clipPath>
+| <svg feBlend>
+| <svg feColorMatrix>
+| <svg feComponentTransfer>
+| <svg feComposite>
+| <svg feConvolveMatrix>
+| <svg feDiffuseLighting>
+| <svg feDisplacementMap>
+| <svg feDistantLight>
+| <svg feFlood>
+| <svg feFuncA>
+| <svg feFuncB>
+| <svg feFuncG>
+| <svg feFuncR>
+| <svg feGaussianBlur>
+| <svg feImage>
+| <svg feMerge>
+| <svg feMergeNode>
+| <svg feMorphology>
+| <svg feOffset>
+| <svg fePointLight>
+| <svg feSpecularLighting>
+| <svg feSpotLight>
+| <svg feTile>
+| <svg feTurbulence>
+| <svg foreignObject>
+| <svg glyphRef>
+| <svg linearGradient>
+| <svg radialGradient>
+| <svg textPath>
+
+#data
+<!DOCTYPE html><BODY><SVG><ALTGLYPH /><ALTGLYPHDEF /><ALTGLYPHITEM /><ANIMATECOLOR /><ANIMATEMOTION /><ANIMATETRANSFORM /><CLIPPATH /><FEBLEND /><FECOLORMATRIX /><FECOMPONENTTRANSFER /><FECOMPOSITE /><FECONVOLVEMATRIX /><FEDIFFUSELIGHTING /><FEDISPLACEMENTMAP /><FEDISTANTLIGHT /><FEFLOOD /><FEFUNCA /><FEFUNCB /><FEFUNCG /><FEFUNCR /><FEGAUSSIANBLUR /><FEIMAGE /><FEMERGE /><FEMERGENODE /><FEMORPHOLOGY /><FEOFFSET /><FEPOINTLIGHT /><FESPECULARLIGHTING /><FESPOTLIGHT /><FETILE /><FETURBULENCE /><FOREIGNOBJECT /><GLYPHREF /><LINEARGRADIENT /><RADIALGRADIENT /><TEXTPATH /></SVG>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg altGlyph>
+| <svg altGlyphDef>
+| <svg altGlyphItem>
+| <svg animateColor>
+| <svg animateMotion>
+| <svg animateTransform>
+| <svg clipPath>
+| <svg feBlend>
+| <svg feColorMatrix>
+| <svg feComponentTransfer>
+| <svg feComposite>
+| <svg feConvolveMatrix>
+| <svg feDiffuseLighting>
+| <svg feDisplacementMap>
+| <svg feDistantLight>
+| <svg feFlood>
+| <svg feFuncA>
+| <svg feFuncB>
+| <svg feFuncG>
+| <svg feFuncR>
+| <svg feGaussianBlur>
+| <svg feImage>
+| <svg feMerge>
+| <svg feMergeNode>
+| <svg feMorphology>
+| <svg feOffset>
+| <svg fePointLight>
+| <svg feSpecularLighting>
+| <svg feSpotLight>
+| <svg feTile>
+| <svg feTurbulence>
+| <svg foreignObject>
+| <svg glyphRef>
+| <svg linearGradient>
+| <svg radialGradient>
+| <svg textPath>
+
+#data
+<!DOCTYPE html><body><math><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math altglyph>
+| <math altglyphdef>
+| <math altglyphitem>
+| <math animatecolor>
+| <math animatemotion>
+| <math animatetransform>
+| <math clippath>
+| <math feblend>
+| <math fecolormatrix>
+| <math fecomponenttransfer>
+| <math fecomposite>
+| <math feconvolvematrix>
+| <math fediffuselighting>
+| <math fedisplacementmap>
+| <math fedistantlight>
+| <math feflood>
+| <math fefunca>
+| <math fefuncb>
+| <math fefuncg>
+| <math fefuncr>
+| <math fegaussianblur>
+| <math feimage>
+| <math femerge>
+| <math femergenode>
+| <math femorphology>
+| <math feoffset>
+| <math fepointlight>
+| <math fespecularlighting>
+| <math fespotlight>
+| <math fetile>
+| <math feturbulence>
+| <math foreignobject>
+| <math glyphref>
+| <math lineargradient>
+| <math radialgradient>
+| <math textpath>
+
+#data
+<!DOCTYPE html><body><svg><solidColor /></svg>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg solidcolor>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests12.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests12.dat
new file mode 100644
index 000000000..63107d277
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests12.dat
@@ -0,0 +1,62 @@
+#data
+<!DOCTYPE html><body><p>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "foo"
+| <math math>
+| <math mtext>
+| <i>
+| "baz"
+| <math annotation-xml>
+| <svg svg>
+| <svg desc>
+| <b>
+| "eggs"
+| <svg g>
+| <svg foreignObject>
+| <p>
+| "spam"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <img>
+| <svg g>
+| "quux"
+| "bar"
+
+#data
+<!DOCTYPE html><body>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "foo"
+| <math math>
+| <math mtext>
+| <i>
+| "baz"
+| <math annotation-xml>
+| <svg svg>
+| <svg desc>
+| <b>
+| "eggs"
+| <svg g>
+| <svg foreignObject>
+| <p>
+| "spam"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <img>
+| <svg g>
+| "quux"
+| "bar"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests14.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests14.dat
new file mode 100644
index 000000000..a08b7649e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests14.dat
@@ -0,0 +1,75 @@
+#data
+<!DOCTYPE html><html><body><xyz:abc></xyz:abc>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xyz:abc>
+
+#data
+<!DOCTYPE html><html><body><xyz:abc></xyz:abc><span></span>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xyz:abc>
+| <span>
+
+#data
+<!DOCTYPE html><html><html abc:def=gh><xyz:abc></xyz:abc>
+#errors
+(1,38): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| abc:def="gh"
+| <head>
+| <body>
+| <xyz:abc>
+
+#data
+<!DOCTYPE html><html xml:lang=bar><html xml:lang=foo>
+#errors
+(1,53): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| xml:lang="bar"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><html 123=456>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| 123="456"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><html 123=456><html 789=012>
+#errors
+(1,43): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| 123="456"
+| 789="012"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><html><body 789=012>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| 789="012"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests15.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests15.dat
new file mode 100644
index 000000000..93d06a871
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests15.dat
@@ -0,0 +1,216 @@
+#data
+<!DOCTYPE html><p><b><i><u></p> <p>X
+#errors
+(1,31): unexpected-end-tag
+(1,36): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <i>
+| <u>
+| <b>
+| <i>
+| <u>
+| " "
+| <p>
+| "X"
+
+#data
+<p><b><i><u></p>
+<p>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,16): unexpected-end-tag
+(2,4): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| <i>
+| <u>
+| <b>
+| <i>
+| <u>
+| "
+"
+| <p>
+| "X"
+
+#data
+<!doctype html></html> <head>
+#errors
+(1,29): expected-eof-but-got-start-tag
+(1,29): unexpected-start-tag-ignored
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " "
+
+#data
+<!doctype html></body><meta>
+#errors
+(1,28): unexpected-start-tag-after-body
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <meta>
+
+#data
+<html></html><!-- foo -->
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <!-- foo -->
+
+#data
+<!doctype html></body><title>X</title>
+#errors
+(1,29): unexpected-start-tag-after-body
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+
+#data
+<!doctype html><table> X<meta></table>
+#errors
+(1,23): foster-parenting-character
+(1,24): foster-parenting-character
+(1,30): foster-parenting-start-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " X"
+| <meta>
+| <table>
+
+#data
+<!doctype html><table> x</table>
+#errors
+(1,23): foster-parenting-character
+(1,24): foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " x"
+| <table>
+
+#data
+<!doctype html><table> x </table>
+#errors
+(1,23): foster-parenting-character
+(1,24): foster-parenting-character
+(1,25): foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " x "
+| <table>
+
+#data
+<!doctype html><table><tr> x</table>
+#errors
+(1,27): foster-parenting-character
+(1,28): foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " x"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table>X<style> <tr>x </style> </table>
+#errors
+(1,23): foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <table>
+| <style>
+| " <tr>x "
+| " "
+
+#data
+<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div>
+#errors
+(1,30): foster-parenting-start-tag
+(1,31): foster-parenting-character
+(1,32): foster-parenting-character
+(1,33): foster-parenting-character
+(1,37): foster-parenting-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <a>
+| "foo"
+| <table>
+| " "
+| <tbody>
+| <tr>
+| <td>
+| "bar"
+| " "
+
+#data
+<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,7): unexpected-start-tag-ignored
+(1,15): unexpected-end-tag
+(1,23): unexpected-end-tag
+(1,33): unexpected-start-tag
+(1,99): expected-named-closing-tag-but-got-eof
+(1,99): eof-in-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+| <frame>
+| <frameset>
+| <frame>
+| <noframes>
+| "</frameset><noframes>"
+
+#data
+<!DOCTYPE html><object></html>
+#errors
+(1,30): expected-body-in-scope
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <object>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests16.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests16.dat
new file mode 100644
index 000000000..121915777
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests16.dat
@@ -0,0 +1,2458 @@
+#data
+<!doctype html><script>
+#errors
+(1,23): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<!doctype html><script>a
+#errors
+(1,24): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "a"
+| <body>
+
+#data
+<!doctype html><script><
+#errors
+(1,24): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<"
+| <body>
+
+#data
+<!doctype html><script></
+#errors
+(1,25): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</"
+| <body>
+
+#data
+<!doctype html><script></S
+#errors
+(1,26): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</S"
+| <body>
+
+#data
+<!doctype html><script></SC
+#errors
+(1,27): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SC"
+| <body>
+
+#data
+<!doctype html><script></SCR
+#errors
+(1,28): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCR"
+| <body>
+
+#data
+<!doctype html><script></SCRI
+#errors
+(1,29): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCRI"
+| <body>
+
+#data
+<!doctype html><script></SCRIP
+#errors
+(1,30): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCRIP"
+| <body>
+
+#data
+<!doctype html><script></SCRIPT
+#errors
+(1,31): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</SCRIPT"
+| <body>
+
+#data
+<!doctype html><script></SCRIPT
+#errors
+(1,32): expected-attribute-name-but-got-eof
+(1,32): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<!doctype html><script></s
+#errors
+(1,26): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</s"
+| <body>
+
+#data
+<!doctype html><script></sc
+#errors
+(1,27): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</sc"
+| <body>
+
+#data
+<!doctype html><script></scr
+#errors
+(1,28): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</scr"
+| <body>
+
+#data
+<!doctype html><script></scri
+#errors
+(1,29): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</scri"
+| <body>
+
+#data
+<!doctype html><script></scrip
+#errors
+(1,30): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</scrip"
+| <body>
+
+#data
+<!doctype html><script></script
+#errors
+(1,31): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "</script"
+| <body>
+
+#data
+<!doctype html><script></script
+#errors
+(1,32): expected-attribute-name-but-got-eof
+(1,32): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<!doctype html><script><!
+#errors
+(1,25): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!"
+| <body>
+
+#data
+<!doctype html><script><!a
+#errors
+(1,26): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!a"
+| <body>
+
+#data
+<!doctype html><script><!-
+#errors
+(1,26): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!-"
+| <body>
+
+#data
+<!doctype html><script><!-a
+#errors
+(1,27): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!-a"
+| <body>
+
+#data
+<!doctype html><script><!--
+#errors
+(1,27): expected-named-closing-tag-but-got-eof
+(1,27): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<!doctype html><script><!--a
+#errors
+(1,28): expected-named-closing-tag-but-got-eof
+(1,28): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--a"
+| <body>
+
+#data
+<!doctype html><script><!--<
+#errors
+(1,28): expected-named-closing-tag-but-got-eof
+(1,28): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<"
+| <body>
+
+#data
+<!doctype html><script><!--<a
+#errors
+(1,29): expected-named-closing-tag-but-got-eof
+(1,29): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<a"
+| <body>
+
+#data
+<!doctype html><script><!--</
+#errors
+(1,29): expected-named-closing-tag-but-got-eof
+(1,29): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--</"
+| <body>
+
+#data
+<!doctype html><script><!--</script
+#errors
+(1,35): expected-named-closing-tag-but-got-eof
+(1,35): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--</script"
+| <body>
+
+#data
+<!doctype html><script><!--</script
+#errors
+(1,36): expected-attribute-name-but-got-eof
+(1,36): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<!doctype html><script><!--<s
+#errors
+(1,29): expected-named-closing-tag-but-got-eof
+(1,29): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<s"
+| <body>
+
+#data
+<!doctype html><script><!--<script
+#errors
+(1,34): expected-named-closing-tag-but-got-eof
+(1,34): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script"
+| <body>
+
+#data
+<!doctype html><script><!--<script
+#errors
+(1,35): eof-in-script-in-script
+(1,35): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script "
+| <body>
+
+#data
+<!doctype html><script><!--<script <
+#errors
+(1,36): eof-in-script-in-script
+(1,36): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script <"
+| <body>
+
+#data
+<!doctype html><script><!--<script <a
+#errors
+(1,37): eof-in-script-in-script
+(1,37): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script <a"
+| <body>
+
+#data
+<!doctype html><script><!--<script </
+#errors
+(1,37): eof-in-script-in-script
+(1,37): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </"
+| <body>
+
+#data
+<!doctype html><script><!--<script </s
+#errors
+(1,38): eof-in-script-in-script
+(1,38): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </s"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script
+#errors
+(1,43): eof-in-script-in-script
+(1,43): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script"
+| <body>
+
+#data
+<!doctype html><script><!--<script </scripta
+#errors
+(1,44): eof-in-script-in-script
+(1,44): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </scripta"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script
+#errors
+(1,44): expected-named-closing-tag-but-got-eof
+(1,44): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script </script>
+#errors
+(1,44): expected-named-closing-tag-but-got-eof
+(1,44): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script>"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script/
+#errors
+(1,44): expected-named-closing-tag-but-got-eof
+(1,44): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script/"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script <
+#errors
+(1,45): expected-named-closing-tag-but-got-eof
+(1,45): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script <a
+#errors
+(1,46): expected-named-closing-tag-but-got-eof
+(1,46): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <a"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </
+#errors
+(1,46): expected-named-closing-tag-but-got-eof
+(1,46): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script
+#errors
+(1,52): expected-named-closing-tag-but-got-eof
+(1,52): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </script"
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script
+#errors
+(1,53): expected-attribute-name-but-got-eof
+(1,53): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script/
+#errors
+(1,53): unexpected-EOF-after-solidus-in-tag
+(1,53): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script </script </script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<!doctype html><script><!--<script -
+#errors
+(1,36): eof-in-script-in-script
+(1,36): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -"
+| <body>
+
+#data
+<!doctype html><script><!--<script -a
+#errors
+(1,37): eof-in-script-in-script
+(1,37): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -a"
+| <body>
+
+#data
+<!doctype html><script><!--<script -<
+#errors
+(1,37): eof-in-script-in-script
+(1,37): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -<"
+| <body>
+
+#data
+<!doctype html><script><!--<script --
+#errors
+(1,37): eof-in-script-in-script
+(1,37): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --"
+| <body>
+
+#data
+<!doctype html><script><!--<script --a
+#errors
+(1,38): eof-in-script-in-script
+(1,38): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --a"
+| <body>
+
+#data
+<!doctype html><script><!--<script --<
+#errors
+(1,38): eof-in-script-in-script
+(1,38): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --<"
+| <body>
+
+#data
+<!doctype html><script><!--<script -->
+#errors
+(1,38): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script --><
+#errors
+(1,39): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --><"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></
+#errors
+(1,40): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --></"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script
+#errors
+(1,46): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script --></script"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script
+#errors
+(1,47): expected-attribute-name-but-got-eof
+(1,47): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script/
+#errors
+(1,47): unexpected-EOF-after-solidus-in-tag
+(1,47): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script --></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<!doctype html><script><!--<script><\/script>--></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script><\/script>-->"
+| <body>
+
+#data
+<!doctype html><script><!--<script></scr'+'ipt>--></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt>-->"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>--><!--</script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>--><!--"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>-- ></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>-- >"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>- -></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- ->"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>- - ></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- - >"
+| <body>
+
+#data
+<!doctype html><script><!--<script></script><script></script>-></script>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>->"
+| <body>
+
+#data
+<!doctype html><script><!--<script>--!></script>X
+#errors
+(1,49): expected-named-closing-tag-but-got-eof
+(1,49): unexpected-EOF-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script>--!></script>X"
+| <body>
+
+#data
+<!doctype html><script><!--<scr'+'ipt></script>--></script>
+#errors
+(1,59): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<scr'+'ipt>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><script><!--<script></scr'+'ipt></script>X
+#errors
+(1,57): expected-named-closing-tag-but-got-eof
+(1,57): unexpected-eof-in-text-mode
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt></script>X"
+| <body>
+
+#data
+<!doctype html><style><!--<style></style>--></style>
+#errors
+(1,52): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--<style>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><style><!--</style>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "X"
+
+#data
+<!doctype html><style><!--...</style>...--></style>
+#errors
+(1,51): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <body>
+| "...-->"
+
+#data
+<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
+| <body>
+| "X"
+
+#data
+<!doctype html><style><!--...<style><!--...--!></style>--></style>
+#errors
+(1,66): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--...<style><!--...--!>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><style><!--...</style><!-- --><style>@import ...</style>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <!-- -->
+| <style>
+| "@import ..."
+| <body>
+
+#data
+<!doctype html><style>...<style><!--...</style><!-- --></style>
+#errors
+(1,63): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "...<style><!--..."
+| <!-- -->
+| <body>
+
+#data
+<!doctype html><style>...<!--[if IE]><style>...</style>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <style>
+| "...<!--[if IE]><style>..."
+| <body>
+| "X"
+
+#data
+<!doctype html><title><!--<title></title>--></title>
+#errors
+(1,52): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "<!--<title>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><title>&lt;/title></title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "</title>"
+| <body>
+
+#data
+<!doctype html><title>foo/title><link></head><body>X
+#errors
+(1,52): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "foo/title><link></head><body>X"
+| <body>
+
+#data
+<!doctype html><noscript><!--<noscript></noscript>--></noscript>
+#errors
+(1,64): unexpected-end-tag
+#script-on
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| "<!--<noscript>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><noscript><!--<noscript></noscript>--></noscript>
+#errors
+#script-off
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| <!-- <noscript></noscript> -->
+| <body>
+
+#data
+<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
+#errors
+#script-on
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| "<!--"
+| <body>
+| "X"
+| <noscript>
+| "-->"
+
+#data
+<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
+#errors
+#script-off
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| <!-- </noscript>X<noscript> -->
+| <body>
+
+#data
+<!doctype html><noscript><iframe></noscript>X
+#errors
+#script-on
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| "<iframe>"
+| <body>
+| "X"
+
+#data
+<!doctype html><noscript><iframe></noscript>X
+#errors
+ * (1,34) unexpected token in head noscript
+ * (1,46) unexpected EOF
+#script-off
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noscript>
+| <body>
+| <iframe>
+| "</noscript>X"
+
+#data
+<!doctype html><noframes><!--<noframes></noframes>--></noframes>
+#errors
+(1,64): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noframes>
+| "<!--<noframes>"
+| <body>
+| "-->"
+
+#data
+<!doctype html><noframes><body><script><!--...</script></body></noframes></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <noframes>
+| "<body><script><!--...</script></body>"
+| <body>
+
+#data
+<!doctype html><textarea><!--<textarea></textarea>--></textarea>
+#errors
+(1,64): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--<textarea>"
+| "-->"
+
+#data
+<!doctype html><textarea>&lt;/textarea></textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "</textarea>"
+
+#data
+<!doctype html><textarea>&lt;</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<"
+
+#data
+<!doctype html><textarea>a&lt;b</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "a<b"
+
+#data
+<!doctype html><iframe><!--<iframe></iframe>--></iframe>
+#errors
+(1,56): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "<!--<iframe>"
+| "-->"
+
+#data
+<!doctype html><iframe>...<!--X->...<!--/X->...</iframe>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "...<!--X->...<!--/X->..."
+
+#data
+<!doctype html><xmp><!--<xmp></xmp>--></xmp>
+#errors
+(1,44): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xmp>
+| "<!--<xmp>"
+| "-->"
+
+#data
+<!doctype html><noembed><!--<noembed></noembed>--></noembed>
+#errors
+(1,60): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <noembed>
+| "<!--<noembed>"
+| "-->"
+
+#data
+<script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,8): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<script>a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,9): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "a"
+| <body>
+
+#data
+<script><
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,9): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<"
+| <body>
+
+#data
+<script></
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,10): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</"
+| <body>
+
+#data
+<script></S
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,11): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</S"
+| <body>
+
+#data
+<script></SC
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,12): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</SC"
+| <body>
+
+#data
+<script></SCR
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,13): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</SCR"
+| <body>
+
+#data
+<script></SCRI
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,14): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</SCRI"
+| <body>
+
+#data
+<script></SCRIP
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,15): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</SCRIP"
+| <body>
+
+#data
+<script></SCRIPT
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,16): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</SCRIPT"
+| <body>
+
+#data
+<script></SCRIPT
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,17): expected-attribute-name-but-got-eof
+(1,17): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<script></s
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,11): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</s"
+| <body>
+
+#data
+<script></sc
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,12): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</sc"
+| <body>
+
+#data
+<script></scr
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,13): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</scr"
+| <body>
+
+#data
+<script></scri
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,14): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</scri"
+| <body>
+
+#data
+<script></scrip
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,15): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</scrip"
+| <body>
+
+#data
+<script></script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,16): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</script"
+| <body>
+
+#data
+<script></script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,17): expected-attribute-name-but-got-eof
+(1,17): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<script><!
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,10): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!"
+| <body>
+
+#data
+<script><!a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,11): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!a"
+| <body>
+
+#data
+<script><!-
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,11): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!-"
+| <body>
+
+#data
+<script><!-a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,12): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!-a"
+| <body>
+
+#data
+<script><!--
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,12): expected-named-closing-tag-but-got-eof
+(1,12): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<script><!--a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,13): expected-named-closing-tag-but-got-eof
+(1,13): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--a"
+| <body>
+
+#data
+<script><!--<
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,13): expected-named-closing-tag-but-got-eof
+(1,13): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<"
+| <body>
+
+#data
+<script><!--<a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,14): expected-named-closing-tag-but-got-eof
+(1,14): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<a"
+| <body>
+
+#data
+<script><!--</
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,14): expected-named-closing-tag-but-got-eof
+(1,14): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--</"
+| <body>
+
+#data
+<script><!--</script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,20): expected-named-closing-tag-but-got-eof
+(1,20): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--</script"
+| <body>
+
+#data
+<script><!--</script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,21): expected-attribute-name-but-got-eof
+(1,21): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--"
+| <body>
+
+#data
+<script><!--<s
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,14): expected-named-closing-tag-but-got-eof
+(1,14): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<s"
+| <body>
+
+#data
+<script><!--<script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,19): expected-named-closing-tag-but-got-eof
+(1,19): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script"
+| <body>
+
+#data
+<script><!--<script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,20): eof-in-script-in-script
+(1,20): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script "
+| <body>
+
+#data
+<script><!--<script <
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,21): eof-in-script-in-script
+(1,21): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script <"
+| <body>
+
+#data
+<script><!--<script <a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,22): eof-in-script-in-script
+(1,22): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script <a"
+| <body>
+
+#data
+<script><!--<script </
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,22): eof-in-script-in-script
+(1,22): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </"
+| <body>
+
+#data
+<script><!--<script </s
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,23): eof-in-script-in-script
+(1,23): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </s"
+| <body>
+
+#data
+<script><!--<script </script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,28): eof-in-script-in-script
+(1,28): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script"
+| <body>
+
+#data
+<script><!--<script </scripta
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,29): eof-in-script-in-script
+(1,29): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </scripta"
+| <body>
+
+#data
+<script><!--<script </script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,29): expected-named-closing-tag-but-got-eof
+(1,29): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script </script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,29): expected-named-closing-tag-but-got-eof
+(1,29): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script>"
+| <body>
+
+#data
+<script><!--<script </script/
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,29): expected-named-closing-tag-but-got-eof
+(1,29): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script/"
+| <body>
+
+#data
+<script><!--<script </script <
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,30): expected-named-closing-tag-but-got-eof
+(1,30): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <"
+| <body>
+
+#data
+<script><!--<script </script <a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,31): expected-named-closing-tag-but-got-eof
+(1,31): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script <a"
+| <body>
+
+#data
+<script><!--<script </script </
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,31): expected-named-closing-tag-but-got-eof
+(1,31): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </"
+| <body>
+
+#data
+<script><!--<script </script </script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,37): expected-named-closing-tag-but-got-eof
+(1,37): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script </script"
+| <body>
+
+#data
+<script><!--<script </script </script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,38): expected-attribute-name-but-got-eof
+(1,38): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script </script </script/
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,38): unexpected-EOF-after-solidus-in-tag
+(1,38): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script </script </script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script </script "
+| <body>
+
+#data
+<script><!--<script -
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,21): eof-in-script-in-script
+(1,21): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -"
+| <body>
+
+#data
+<script><!--<script -a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,22): eof-in-script-in-script
+(1,22): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -a"
+| <body>
+
+#data
+<script><!--<script --
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,22): eof-in-script-in-script
+(1,22): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --"
+| <body>
+
+#data
+<script><!--<script --a
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,23): eof-in-script-in-script
+(1,23): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --a"
+| <body>
+
+#data
+<script><!--<script -->
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,23): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script --><
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,24): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --><"
+| <body>
+
+#data
+<script><!--<script --></
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,25): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --></"
+| <body>
+
+#data
+<script><!--<script --></script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,31): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script --></script"
+| <body>
+
+#data
+<script><!--<script --></script
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,32): expected-attribute-name-but-got-eof
+(1,32): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script --></script/
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,32): unexpected-EOF-after-solidus-in-tag
+(1,32): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script --></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script -->"
+| <body>
+
+#data
+<script><!--<script><\/script>--></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script><\/script>-->"
+| <body>
+
+#data
+<script><!--<script></scr'+'ipt>--></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt>-->"
+| <body>
+
+#data
+<script><!--<script></script><script></script></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>"
+| <body>
+
+#data
+<script><!--<script></script><script></script>--><!--</script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>--><!--"
+| <body>
+
+#data
+<script><!--<script></script><script></script>-- ></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>-- >"
+| <body>
+
+#data
+<script><!--<script></script><script></script>- -></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- ->"
+| <body>
+
+#data
+<script><!--<script></script><script></script>- - ></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>- - >"
+| <body>
+
+#data
+<script><!--<script></script><script></script>-></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></script><script></script>->"
+| <body>
+
+#data
+<script><!--<script>--!></script>X
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,34): expected-named-closing-tag-but-got-eof
+(1,34): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script>--!></script>X"
+| <body>
+
+#data
+<script><!--<scr'+'ipt></script>--></script>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,44): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<scr'+'ipt>"
+| <body>
+| "-->"
+
+#data
+<script><!--<script></scr'+'ipt></script>X
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,42): expected-named-closing-tag-but-got-eof
+(1,42): unexpected-eof-in-text-mode
+#document
+| <html>
+| <head>
+| <script>
+| "<!--<script></scr'+'ipt></script>X"
+| <body>
+
+#data
+<style><!--<style></style>--></style>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,37): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--<style>"
+| <body>
+| "-->"
+
+#data
+<style><!--</style>X
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--"
+| <body>
+| "X"
+
+#data
+<style><!--...</style>...--></style>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,36): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <body>
+| "...-->"
+
+#data
+<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
+| <body>
+| "X"
+
+#data
+<style><!--...<style><!--...--!></style>--></style>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,51): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--...<style><!--...--!>"
+| <body>
+| "-->"
+
+#data
+<style><!--...</style><!-- --><style>@import ...</style>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| "<!--..."
+| <!-- -->
+| <style>
+| "@import ..."
+| <body>
+
+#data
+<style>...<style><!--...</style><!-- --></style>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,48): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <style>
+| "...<style><!--..."
+| <!-- -->
+| <body>
+
+#data
+<style>...<!--[if IE]><style>...</style>X
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| "...<!--[if IE]><style>..."
+| <body>
+| "X"
+
+#data
+<title><!--<title></title>--></title>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,37): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <title>
+| "<!--<title>"
+| <body>
+| "-->"
+
+#data
+<title>&lt;/title></title>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <title>
+| "</title>"
+| <body>
+
+#data
+<title>foo/title><link></head><body>X
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,37): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <title>
+| "foo/title><link></head><body>X"
+| <body>
+
+#data
+<noscript><!--<noscript></noscript>--></noscript>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,49): unexpected-end-tag
+#script-on
+#document
+| <html>
+| <head>
+| <noscript>
+| "<!--<noscript>"
+| <body>
+| "-->"
+
+#data
+<noscript><!--<noscript></noscript>--></noscript>
+#errors
+ * (1,11) missing DOCTYPE
+#script-off
+#document
+| <html>
+| <head>
+| <noscript>
+| <!-- <noscript></noscript> -->
+| <body>
+
+#data
+<noscript><!--</noscript>X<noscript>--></noscript>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#script-on
+#document
+| <html>
+| <head>
+| <noscript>
+| "<!--"
+| <body>
+| "X"
+| <noscript>
+| "-->"
+
+#data
+<noscript><!--</noscript>X<noscript>--></noscript>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#script-off
+#document
+| <html>
+| <head>
+| <noscript>
+| <!-- </noscript>X<noscript> -->
+| <body>
+
+#data
+<noscript><iframe></noscript>X
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#script-on
+#document
+| <html>
+| <head>
+| <noscript>
+| "<iframe>"
+| <body>
+| "X"
+
+#data
+<noscript><iframe></noscript>X
+#errors
+ * (1,11) missing DOCTYPE
+ * (1,19) unexpected token in head noscript
+ * (1,31) unexpected EOF
+#script-off
+#document
+| <html>
+| <head>
+| <noscript>
+| <body>
+| <iframe>
+| "</noscript>X"
+
+#data
+<noframes><!--<noframes></noframes>--></noframes>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,49): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <noframes>
+| "<!--<noframes>"
+| <body>
+| "-->"
+
+#data
+<noframes><body><script><!--...</script></body></noframes></html>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <noframes>
+| "<body><script><!--...</script></body>"
+| <body>
+
+#data
+<textarea><!--<textarea></textarea>--></textarea>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,49): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "<!--<textarea>"
+| "-->"
+
+#data
+<textarea>&lt;/textarea></textarea>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "</textarea>"
+
+#data
+<iframe><!--<iframe></iframe>--></iframe>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,41): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "<!--<iframe>"
+| "-->"
+
+#data
+<iframe>...<!--X->...<!--/X->...</iframe>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| "...<!--X->...<!--/X->..."
+
+#data
+<xmp><!--<xmp></xmp>--></xmp>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,29): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <xmp>
+| "<!--<xmp>"
+| "-->"
+
+#data
+<noembed><!--<noembed></noembed>--></noembed>
+#errors
+(1,9): expected-doctype-but-got-start-tag
+(1,45): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <noembed>
+| "<!--<noembed>"
+| "-->"
+
+#data
+<!doctype html><table>
+
+#errors
+(2,0): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| "
+"
+
+#data
+<!doctype html><table><td><span><font></span><span>
+#errors
+(1,26): unexpected-cell-in-table-body
+(1,45): unexpected-end-tag
+(1,51): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <span>
+| <font>
+| <font>
+| <span>
+
+#data
+<!doctype html><form><table></form><form></table></form>
+#errors
+(1,35): unexpected-end-tag-implies-table-voodoo
+(1,35): unexpected-end-tag
+(1,41): unexpected-form-in-table
+(1,56): unexpected-end-tag
+(1,56): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <table>
+| <form>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests17.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests17.dat
new file mode 100644
index 000000000..e49bcf031
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests17.dat
@@ -0,0 +1,179 @@
+#data
+<!doctype html><table><tbody><select><tr>
+#errors
+(1,37): unexpected-start-tag-implies-table-voodoo
+(1,41): unexpected-table-element-start-tag-in-select-in-table
+(1,41): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><tr><select><td>
+#errors
+(1,34): unexpected-start-tag-implies-table-voodoo
+(1,38): unexpected-table-element-start-tag-in-select-in-table
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<!doctype html><table><tr><td><select><td>
+#errors
+(1,42): unexpected-table-element-start-tag-in-select-in-table
+(1,42): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| <td>
+
+#data
+<!doctype html><table><tr><th><select><td>
+#errors
+(1,42): unexpected-table-element-start-tag-in-select-in-table
+(1,42): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <th>
+| <select>
+| <td>
+
+#data
+<!doctype html><table><caption><select><tr>
+#errors
+(1,43): unexpected-table-element-start-tag-in-select-in-table
+(1,43): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <select>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><select><tr>
+#errors
+(1,27): unexpected-start-tag-in-select
+(1,27): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><td>
+#errors
+(1,27): unexpected-start-tag-in-select
+(1,27): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><th>
+#errors
+(1,27): unexpected-start-tag-in-select
+(1,27): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><tbody>
+#errors
+(1,30): unexpected-start-tag-in-select
+(1,30): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><thead>
+#errors
+(1,30): unexpected-start-tag-in-select
+(1,30): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><tfoot>
+#errors
+(1,30): unexpected-start-tag-in-select
+(1,30): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><select><caption>
+#errors
+(1,32): unexpected-start-tag-in-select
+(1,32): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><table><tr></table>a
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| "a"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests18.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests18.dat
new file mode 100644
index 000000000..926bccb38
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests18.dat
@@ -0,0 +1,322 @@
+#data
+<!doctype html><plaintext></plaintext>
+#errors
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!doctype html><table><plaintext></plaintext>
+#errors
+(1,33): foster-parenting-start-tag
+(1,34): foster-parenting-character
+(1,35): foster-parenting-character
+(1,36): foster-parenting-character
+(1,37): foster-parenting-character
+(1,38): foster-parenting-character
+(1,39): foster-parenting-character
+(1,40): foster-parenting-character
+(1,41): foster-parenting-character
+(1,42): foster-parenting-character
+(1,43): foster-parenting-character
+(1,44): foster-parenting-character
+(1,45): foster-parenting-character
+(1,45): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+
+#data
+<!doctype html><table><tbody><plaintext></plaintext>
+#errors
+(1,40): foster-parenting-start-tag
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,41): foster-parenting-character
+(1,52): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+| <tbody>
+
+#data
+<!doctype html><table><tbody><tr><plaintext></plaintext>
+#errors
+(1,44): foster-parenting-start-tag
+(1,45): foster-parenting-character
+(1,46): foster-parenting-character
+(1,47): foster-parenting-character
+(1,48): foster-parenting-character
+(1,49): foster-parenting-character
+(1,50): foster-parenting-character
+(1,51): foster-parenting-character
+(1,52): foster-parenting-character
+(1,53): foster-parenting-character
+(1,54): foster-parenting-character
+(1,55): foster-parenting-character
+(1,56): foster-parenting-character
+(1,56): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><td><plaintext></plaintext>
+#errors
+(1,26): unexpected-cell-in-table-body
+(1,49): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!doctype html><table><caption><plaintext></plaintext>
+#errors
+(1,54): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!doctype html><table><tr><style></script></style>abc
+#errors
+(1,51): foster-parenting-character
+(1,52): foster-parenting-character
+(1,53): foster-parenting-character
+(1,53): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "abc"
+| <table>
+| <tbody>
+| <tr>
+| <style>
+| "</script>"
+
+#data
+<!doctype html><table><tr><script></style></script>abc
+#errors
+(1,52): foster-parenting-character
+(1,53): foster-parenting-character
+(1,54): foster-parenting-character
+(1,54): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "abc"
+| <table>
+| <tbody>
+| <tr>
+| <script>
+| "</style>"
+
+#data
+<!doctype html><table><caption><style></script></style>abc
+#errors
+(1,58): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <style>
+| "</script>"
+| "abc"
+
+#data
+<!doctype html><table><td><style></script></style>abc
+#errors
+(1,26): unexpected-cell-in-table-body
+(1,53): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <style>
+| "</script>"
+| "abc"
+
+#data
+<!doctype html><select><script></style></script>abc
+#errors
+(1,51): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <script>
+| "</style>"
+| "abc"
+
+#data
+<!doctype html><table><select><script></style></script>abc
+#errors
+(1,30): unexpected-start-tag-implies-table-voodoo
+(1,58): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <script>
+| "</style>"
+| "abc"
+| <table>
+
+#data
+<!doctype html><table><tr><select><script></style></script>abc
+#errors
+(1,34): unexpected-start-tag-implies-table-voodoo
+(1,62): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <script>
+| "</style>"
+| "abc"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><frameset></frameset><noframes>abc
+#errors
+(1,49): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+
+#data
+<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+| <!-- abc -->
+
+#data
+<!doctype html><frameset></frameset></html><noframes>abc
+#errors
+(1,56): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+
+#data
+<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <noframes>
+| "abc"
+| <!-- abc -->
+
+#data
+<!doctype html><table><tr></tbody><tfoot>
+#errors
+(1,41): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <tfoot>
+
+#data
+<!doctype html><table><td><svg></svg>abc<td>
+#errors
+(1,26): unexpected-cell-in-table-body
+(1,44): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| "abc"
+| <td>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests19.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests19.dat
new file mode 100644
index 000000000..02158c8bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests19.dat
@@ -0,0 +1,1524 @@
+#data
+<!doctype html><math><mn DefinitionUrl="foo">
+#errors
+(1,45): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mn>
+| definitionURL="foo"
+
+#data
+<!doctype html><html></p><!--foo-->
+#errors
+(1,25): end-tag-after-implied-root
+#document
+| <!DOCTYPE html>
+| <html>
+| <!-- foo -->
+| <head>
+| <body>
+
+#data
+<!doctype html><head></head></p><!--foo-->
+#errors
+(1,32): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <!-- foo -->
+| <body>
+
+#data
+<!doctype html><body><p><pre>
+#errors
+(1,29): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <pre>
+
+#data
+<!doctype html><body><p><listing>
+#errors
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <listing>
+
+#data
+<!doctype html><p><plaintext>
+#errors
+(1,29): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <plaintext>
+
+#data
+<!doctype html><p><h1>
+#errors
+(1,22): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <h1>
+
+#data
+<!doctype html><form><isindex>
+#errors
+(1,30): deprecated-tag
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+
+#data
+<!doctype html><isindex action="POST">
+#errors
+(1,38): deprecated-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| action="POST"
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<!doctype html><isindex prompt="this is isindex">
+#errors
+(1,49): deprecated-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "this is isindex"
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<!doctype html><isindex type="hidden">
+#errors
+(1,38): deprecated-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| type="hidden"
+| <hr>
+
+#data
+<!doctype html><isindex name="foo">
+#errors
+(1,35): deprecated-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| <hr>
+
+#data
+<!doctype html><ruby><p><rp>
+#errors
+(1,28): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <p>
+| <rp>
+
+#data
+<!doctype html><ruby><div><span><rp>
+#errors
+(1,36): XXX-undefined-error
+(1,36): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <span>
+| <rp>
+
+#data
+<!doctype html><ruby><div><p><rp>
+#errors
+(1,33): XXX-undefined-error
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <p>
+| <rp>
+
+#data
+<!doctype html><ruby><p><rt>
+#errors
+(1,28): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <p>
+| <rt>
+
+#data
+<!doctype html><ruby><div><span><rt>
+#errors
+(1,36): XXX-undefined-error
+(1,36): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <span>
+| <rt>
+
+#data
+<!doctype html><ruby><div><p><rt>
+#errors
+(1,33): XXX-undefined-error
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <p>
+| <rt>
+
+#data
+<html><ruby>a<rb>b<rt></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rb>
+| "b"
+| <rt>
+
+#data
+<html><ruby>a<rp>b<rt></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rp>
+| "b"
+| <rt>
+
+#data
+<html><ruby>a<rt>b<rt></ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rt>
+| "b"
+| <rt>
+
+#data
+<html><ruby>a<rtc>b<rt>c<rb>d</ruby></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| "a"
+| <rtc>
+| "b"
+| <rt>
+| "c"
+| <rb>
+| "d"
+
+#data
+<!doctype html><math/><foo>
+#errors
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <foo>
+
+#data
+<!doctype html><svg/><foo>
+#errors
+(1,26): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <foo>
+
+#data
+<!doctype html><div></body><!--foo-->
+#errors
+(1,27): expected-one-end-tag-but-got-another
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <!-- foo -->
+
+#data
+<!doctype html><h1><div><h3><span></h1>foo
+#errors
+(1,39): end-tag-too-early
+(1,42): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <h1>
+| <div>
+| <h3>
+| <span>
+| "foo"
+
+#data
+<!doctype html><p></h3>foo
+#errors
+(1,23): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "foo"
+
+#data
+<!doctype html><h3><li>abc</h2>foo
+#errors
+(1,31): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <h3>
+| <li>
+| "abc"
+| "foo"
+
+#data
+<!doctype html><table>abc<!--foo-->
+#errors
+(1,23): foster-parenting-character
+(1,24): foster-parenting-character
+(1,25): foster-parenting-character
+(1,35): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "abc"
+| <table>
+| <!-- foo -->
+
+#data
+<!doctype html><table> <!--foo-->
+#errors
+(1,34): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <!-- foo -->
+
+#data
+<!doctype html><table> b <!--foo-->
+#errors
+(1,23): foster-parenting-character
+(1,24): foster-parenting-character
+(1,25): foster-parenting-character
+(1,35): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| " b "
+| <table>
+| <!-- foo -->
+
+#data
+<!doctype html><select><option><option>
+#errors
+(1,39): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <option>
+
+#data
+<!doctype html><select><option></optgroup>
+#errors
+(1,42): unexpected-end-tag-in-select
+(1,42): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!doctype html><select><option></optgroup>
+#errors
+(1,42): unexpected-end-tag-in-select
+(1,42): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!doctype html><dd><optgroup><dd>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dd>
+| <optgroup>
+| <dd>
+
+#data
+<!doctype html><p><math><mi><p><h1>
+#errors
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mi>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><mo><p><h1>
+#errors
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mo>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><mn><p><h1>
+#errors
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mn>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><ms><p><h1>
+#errors
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math ms>
+| <p>
+| <h1>
+
+#data
+<!doctype html><p><math><mtext><p><h1>
+#errors
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mtext>
+| <p>
+| <h1>
+
+#data
+<!doctype html><frameset></noframes>
+#errors
+(1,36): unexpected-end-tag-in-frameset
+(1,36): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html c=d><body></html><html a=b>
+#errors
+(1,48): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| a="b"
+| c="d"
+| <head>
+| <body>
+
+#data
+<!doctype html><html c=d><frameset></frameset></html><html a=b>
+#errors
+(1,63): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| a="b"
+| c="d"
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html><frameset></frameset></html><!--foo-->
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <!-- foo -->
+
+#data
+<!doctype html><html><frameset></frameset></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| " "
+
+#data
+<!doctype html><html><frameset></frameset></html>abc
+#errors
+(1,50): expected-eof-but-got-char
+(1,51): expected-eof-but-got-char
+(1,52): expected-eof-but-got-char
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html><frameset></frameset></html><p>
+#errors
+(1,52): expected-eof-but-got-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><html><frameset></frameset></html></p>
+#errors
+(1,53): expected-eof-but-got-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<html><frameset></frameset></html><!doctype html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,49): unexpected-doctype
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><body><frameset>
+#errors
+(1,31): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!doctype html><p><frameset><frame>
+#errors
+(1,28): unexpected-start-tag
+(1,35): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><p>a<frameset>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "a"
+
+#data
+<!doctype html><p> <frameset><frame>
+#errors
+(1,29): unexpected-start-tag
+(1,36): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><pre><frameset>
+#errors
+(1,30): unexpected-start-tag
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+
+#data
+<!doctype html><listing><frameset>
+#errors
+(1,34): unexpected-start-tag
+(1,34): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <listing>
+
+#data
+<!doctype html><li><frameset>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <li>
+
+#data
+<!doctype html><dd><frameset>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dd>
+
+#data
+<!doctype html><dt><frameset>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dt>
+
+#data
+<!doctype html><button><frameset>
+#errors
+(1,33): unexpected-start-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <button>
+
+#data
+<!doctype html><applet><frameset>
+#errors
+(1,33): unexpected-start-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <applet>
+
+#data
+<!doctype html><marquee><frameset>
+#errors
+(1,34): unexpected-start-tag
+(1,34): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <marquee>
+
+#data
+<!doctype html><object><frameset>
+#errors
+(1,33): unexpected-start-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <object>
+
+#data
+<!doctype html><table><frameset>
+#errors
+(1,32): unexpected-start-tag-implies-table-voodoo
+(1,32): unexpected-start-tag
+(1,32): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+
+#data
+<!doctype html><area><frameset>
+#errors
+(1,31): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <area>
+
+#data
+<!doctype html><basefont><frameset>
+#errors
+(1,35): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <basefont>
+| <frameset>
+
+#data
+<!doctype html><bgsound><frameset>
+#errors
+(1,34): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <bgsound>
+| <frameset>
+
+#data
+<!doctype html><br><frameset>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <br>
+
+#data
+<!doctype html><embed><frameset>
+#errors
+(1,32): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <embed>
+
+#data
+<!doctype html><img><frameset>
+#errors
+(1,30): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <img>
+
+#data
+<!doctype html><input><frameset>
+#errors
+(1,32): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+
+#data
+<!doctype html><keygen><frameset>
+#errors
+(1,33): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <keygen>
+
+#data
+<!doctype html><wbr><frameset>
+#errors
+(1,30): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <wbr>
+
+#data
+<!doctype html><hr><frameset>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <hr>
+
+#data
+<!doctype html><textarea></textarea><frameset>
+#errors
+(1,46): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+
+#data
+<!doctype html><xmp></xmp><frameset>
+#errors
+(1,36): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <xmp>
+
+#data
+<!doctype html><iframe></iframe><frameset>
+#errors
+(1,42): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <iframe>
+
+#data
+<!doctype html><select></select><frameset>
+#errors
+(1,42): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!doctype html><svg></svg><frameset><frame>
+#errors
+(1,36): unexpected-start-tag
+(1,43): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><math></math><frameset><frame>
+#errors
+(1,38): unexpected-start-tag
+(1,45): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><svg><foreignObject><div> <frameset><frame>
+#errors
+(1,51): unexpected-start-tag
+(1,58): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<!doctype html><svg>a</svg><frameset><frame>
+#errors
+(1,37): unexpected-start-tag
+(1,44): unexpected-start-tag-ignored
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "a"
+
+#data
+<!doctype html><svg> </svg><frameset><frame>
+#errors
+(1,37): unexpected-start-tag
+(1,44): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| <frame>
+
+#data
+<html>aaa<frameset></frameset>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,19): unexpected-start-tag
+(1,30): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| "aaa"
+
+#data
+<html> a <frameset></frameset>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,19): unexpected-start-tag
+(1,30): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| "a "
+
+#data
+<!doctype html><div><frameset>
+#errors
+(1,30): unexpected-start-tag
+(1,30): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><div><body><frameset>
+#errors
+(1,26): unexpected-start-tag
+(1,36): unexpected-start-tag
+(1,36): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<!doctype html><p><math></p>a
+#errors
+(1,28): unexpected-end-tag
+(1,28): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| "a"
+
+#data
+<!doctype html><p><math><mn><span></p>a
+#errors
+(1,38): unexpected-end-tag
+(1,39): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <math math>
+| <math mn>
+| <span>
+| <p>
+| "a"
+
+#data
+<!doctype html><math></html>
+#errors
+(1,28): unexpected-end-tag
+(1,28): expected-one-end-tag-but-got-another
+(1,28): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+
+#data
+<!doctype html><meta charset="ascii">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <meta>
+| charset="ascii"
+| <body>
+
+#data
+<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <meta>
+| content="text/html;charset=ascii"
+| http-equiv="content-type"
+| <body>
+
+#data
+<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8">
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -->
+| <meta>
+| charset="utf8"
+| <body>
+
+#data
+<!doctype html><html a=b><head></head><html c=d>
+#errors
+(1,48): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| a="b"
+| c="d"
+| <head>
+| <body>
+
+#data
+<!doctype html><image/>
+#errors
+(1,23): image-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <img>
+
+#data
+<!doctype html>a<i>b<table>c<b>d</i>e</b>f
+#errors
+(1,28): foster-parenting-character
+(1,31): foster-parenting-start-tag
+(1,32): foster-parenting-character
+(1,36): foster-parenting-end-tag
+(1,36): adoption-agency-1.3
+(1,37): foster-parenting-character
+(1,41): foster-parenting-end-tag
+(1,42): foster-parenting-character
+(1,42): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "a"
+| <i>
+| "bc"
+| <b>
+| "de"
+| "f"
+| <table>
+
+#data
+<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
+#errors
+(1,25): foster-parenting-start-tag
+(1,26): foster-parenting-character
+(1,29): foster-parenting-start-tag
+(1,30): foster-parenting-character
+(1,35): foster-parenting-start-tag
+(1,36): foster-parenting-character
+(1,39): foster-parenting-start-tag
+(1,40): foster-parenting-character
+(1,44): foster-parenting-end-tag
+(1,44): adoption-agency-1.3
+(1,44): adoption-agency-1.3
+(1,45): foster-parenting-character
+(1,49): foster-parenting-end-tag
+(1,49): adoption-agency-1.3
+(1,49): adoption-agency-1.3
+(1,50): foster-parenting-character
+(1,50): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <b>
+| <i>
+| "c"
+| <a>
+| "d"
+| <a>
+| "e"
+| <a>
+| "f"
+| <table>
+
+#data
+<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f
+#errors
+(1,37): adoption-agency-1.3
+(1,37): adoption-agency-1.3
+(1,42): adoption-agency-1.3
+(1,42): adoption-agency-1.3
+(1,43): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <b>
+| <i>
+| "c"
+| <a>
+| "d"
+| <a>
+| "e"
+| <a>
+| "f"
+
+#data
+<!doctype html><table><i>a<b>b<div>c</i>
+#errors
+(1,25): foster-parenting-start-tag
+(1,26): foster-parenting-character
+(1,29): foster-parenting-start-tag
+(1,30): foster-parenting-character
+(1,35): foster-parenting-start-tag
+(1,36): foster-parenting-character
+(1,40): foster-parenting-end-tag
+(1,40): adoption-agency-1.3
+(1,40): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <i>
+| "c"
+| <table>
+
+#data
+<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
+#errors
+(1,25): foster-parenting-start-tag
+(1,26): foster-parenting-character
+(1,29): foster-parenting-start-tag
+(1,30): foster-parenting-character
+(1,35): foster-parenting-start-tag
+(1,36): foster-parenting-character
+(1,39): foster-parenting-start-tag
+(1,40): foster-parenting-character
+(1,44): foster-parenting-end-tag
+(1,44): adoption-agency-1.3
+(1,44): adoption-agency-1.3
+(1,45): foster-parenting-character
+(1,49): foster-parenting-end-tag
+(1,44): adoption-agency-1.3
+(1,44): adoption-agency-1.3
+(1,50): foster-parenting-character
+(1,50): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <b>
+| "b"
+| <b>
+| <div>
+| <b>
+| <i>
+| "c"
+| <a>
+| "d"
+| <a>
+| "e"
+| <a>
+| "f"
+| <table>
+
+#data
+<!doctype html><table><i>a<div>b<tr>c<b>d</i>e
+#errors
+(1,25): foster-parenting-start-tag
+(1,26): foster-parenting-character
+(1,31): foster-parenting-start-tag
+(1,32): foster-parenting-character
+(1,37): foster-parenting-character
+(1,40): foster-parenting-start-tag
+(1,41): foster-parenting-character
+(1,45): foster-parenting-end-tag
+(1,45): adoption-agency-1.3
+(1,46): foster-parenting-character
+(1,46): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <i>
+| "a"
+| <div>
+| "b"
+| <i>
+| "c"
+| <b>
+| "d"
+| <b>
+| "e"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><table><td><table><i>a<div>b<b>c</i>d
+#errors
+(1,26): unexpected-cell-in-table-body
+(1,36): foster-parenting-start-tag
+(1,37): foster-parenting-character
+(1,42): foster-parenting-start-tag
+(1,43): foster-parenting-character
+(1,46): foster-parenting-start-tag
+(1,47): foster-parenting-character
+(1,51): foster-parenting-end-tag
+(1,51): adoption-agency-1.3
+(1,51): adoption-agency-1.3
+(1,52): foster-parenting-character
+(1,52): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <i>
+| "a"
+| <div>
+| <i>
+| "b"
+| <b>
+| "c"
+| <b>
+| "d"
+| <table>
+
+#data
+<!doctype html><body><bgsound>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <bgsound>
+
+#data
+<!doctype html><body><basefont>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <basefont>
+
+#data
+<!doctype html><a><b></a><basefont>
+#errors
+(1,25): adoption-agency-1.3
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <basefont>
+
+#data
+<!doctype html><a><b></a><bgsound>
+#errors
+(1,25): adoption-agency-1.3
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <bgsound>
+
+#data
+<!doctype html><figcaption><article></figcaption>a
+#errors
+(1,49): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <figcaption>
+| <article>
+| "a"
+
+#data
+<!doctype html><summary><article></summary>a
+#errors
+(1,43): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <summary>
+| <article>
+| "a"
+
+#data
+<!doctype html><p><a><plaintext>b
+#errors
+(1,32): unexpected-end-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <a>
+| <plaintext>
+| <a>
+| "b"
+
+#data
+<!DOCTYPE html><div>a<a></div>b<p>c</p>d
+#errors
+(1,30): end-tag-too-early
+(1,40): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| "a"
+| <a>
+| <a>
+| "b"
+| <p>
+| "c"
+| "d"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests2.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests2.dat
new file mode 100644
index 000000000..41953160f
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests2.dat
@@ -0,0 +1,799 @@
+#data
+<!DOCTYPE html>Test
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "Test"
+
+#data
+<textarea>test</div>test
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,24): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "test</div>test"
+
+#data
+<table><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,11): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><td>test</tbody></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "test"
+
+#data
+<frame>test
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,7): unexpected-start-tag-ignored
+#document
+| <html>
+| <head>
+| <body>
+| "test"
+
+#data
+<!DOCTYPE html><frameset>test
+#errors
+(1,29): unexpected-char-in-frameset
+(1,29): unexpected-char-in-frameset
+(1,29): unexpected-char-in-frameset
+(1,29): unexpected-char-in-frameset
+(1,29): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><frameset> te st
+#errors
+(1,29): unexpected-char-in-frameset
+(1,29): unexpected-char-in-frameset
+(1,29): unexpected-char-in-frameset
+(1,29): unexpected-char-in-frameset
+(1,29): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| " "
+
+#data
+<!DOCTYPE html><frameset></frameset> te st
+#errors
+(1,29): unexpected-char-after-frameset
+(1,29): unexpected-char-after-frameset
+(1,29): unexpected-char-after-frameset
+(1,29): unexpected-char-after-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+| " "
+
+#data
+<!DOCTYPE html><frameset><!DOCTYPE html>
+#errors
+(1,40): unexpected-doctype
+(1,40): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><font><p><b>test</font>
+#errors
+(1,38): adoption-agency-1.3
+(1,38): adoption-agency-1.3
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <font>
+| <p>
+| <font>
+| <b>
+| "test"
+
+#data
+<!DOCTYPE html><dt><div><dd>
+#errors
+(1,28): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <dt>
+| <div>
+| <dd>
+
+#data
+<script></x
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,11): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <script>
+| "</x"
+| <body>
+
+#data
+<table><plaintext><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,18): unexpected-start-tag-implies-table-voodoo
+(1,22): foster-parenting-character-in-table
+(1,22): foster-parenting-character-in-table
+(1,22): foster-parenting-character-in-table
+(1,22): foster-parenting-character-in-table
+(1,22): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "<td>"
+| <table>
+
+#data
+<plaintext></plaintext>
+#errors
+(1,11): expected-doctype-but-got-start-tag
+(1,23): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <plaintext>
+| "</plaintext>"
+
+#data
+<!DOCTYPE html><table><tr>TEST
+#errors
+(1,30): foster-parenting-character-in-table
+(1,30): foster-parenting-character-in-table
+(1,30): foster-parenting-character-in-table
+(1,30): foster-parenting-character-in-table
+(1,30): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "TEST"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4>
+#errors
+(1,37): unexpected-start-tag
+(1,53): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| t1="1"
+| t2="2"
+| t3="3"
+| t4="4"
+
+#data
+</b test
+#errors
+(1,8): eof-in-attribute-name
+(1,8): expected-doctype-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html></b test<b &=&amp>X
+#errors
+(1,24): invalid-character-in-attribute-name
+(1,32): named-entity-without-semicolon
+(1,33): attributes-in-end-tag
+(1,33): unexpected-end-tag-before-html
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+
+#data
+<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt
+#errors
+(1,9): need-space-after-doctype
+(1,54): expected-named-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| type="text/x-foobar;baz"
+| "X</SCRipt"
+| <body>
+
+#data
+&
+#errors
+(1,1): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&"
+
+#data
+&#
+#errors
+(1,2): expected-numeric-entity
+(1,2): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&#"
+
+#data
+&#X
+#errors
+(1,3): expected-numeric-entity
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&#X"
+
+#data
+&#x
+#errors
+(1,3): expected-numeric-entity
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&#x"
+
+#data
+&#45
+#errors
+(1,4): numeric-entity-without-semicolon
+(1,4): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "-"
+
+#data
+&x-test
+#errors
+(1,2): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&x-test"
+
+#data
+<!doctypehtml><p><li>
+#errors
+(1,9): need-space-after-doctype
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <li>
+
+#data
+<!doctypehtml><p><dt>
+#errors
+(1,9): need-space-after-doctype
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <dt>
+
+#data
+<!doctypehtml><p><dd>
+#errors
+(1,9): need-space-after-doctype
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <dd>
+
+#data
+<!doctypehtml><p><form>
+#errors
+(1,9): need-space-after-doctype
+(1,23): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <form>
+
+#data
+<!DOCTYPE html><p></P>X
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| "X"
+
+#data
+&AMP
+#errors
+(1,4): named-entity-without-semicolon
+(1,4): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&"
+
+#data
+&AMp;
+#errors
+(1,3): expected-named-entity
+(1,3): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "&AMp;"
+
+#data
+<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY>
+#errors
+(1,110): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly>
+
+#data
+<!DOCTYPE html>X</body>X
+#errors
+(1,24): unexpected-char-after-body
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "XX"
+
+#data
+<!DOCTYPE html><!-- X
+#errors
+(1,21): eof-in-comment
+#document
+| <!DOCTYPE html>
+| <!-- X -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><table><caption>test TEST</caption><td>test
+#errors
+(1,54): unexpected-cell-in-table-body
+(1,58): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| "test TEST"
+| <tbody>
+| <tr>
+| <td>
+| "test"
+
+#data
+<!DOCTYPE html><select><option><optgroup>
+#errors
+(1,41): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| <optgroup>
+
+#data
+<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option>
+#errors
+(1,68): unexpected-select-in-select
+(1,76): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <optgroup>
+| <option>
+| <option>
+| <option>
+
+#data
+<!DOCTYPE html><select><optgroup><option><optgroup>
+#errors
+(1,51): eof-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <optgroup>
+| <option>
+| <optgroup>
+
+#data
+<!DOCTYPE html><datalist><option>foo</datalist>bar
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <datalist>
+| <option>
+| "foo"
+| "bar"
+
+#data
+<!DOCTYPE html><font><input><input></font>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <font>
+| <input>
+| <input>
+
+#data
+<!DOCTYPE html><!-- XXX - XXX -->
+#errors
+#document
+| <!DOCTYPE html>
+| <!-- XXX - XXX -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><!-- XXX - XXX
+#errors
+(1,29): eof-in-comment
+#document
+| <!DOCTYPE html>
+| <!-- XXX - XXX -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><!-- XXX - XXX - XXX -->
+#errors
+#document
+| <!DOCTYPE html>
+| <!-- XXX - XXX - XXX -->
+| <html>
+| <head>
+| <body>
+
+#data
+<isindex test=x name=x>
+#errors
+(1,23): expected-doctype-but-got-start-tag
+(1,23): deprecated-tag
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+| <hr>
+| <label>
+| "This is a searchable index. Enter search keywords: "
+| <input>
+| name="isindex"
+| test="x"
+| <hr>
+
+#data
+test
+test
+#errors
+(2,4): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "test
+test"
+
+#data
+<!DOCTYPE html><body><title>test</body></title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "test</body>"
+
+#data
+<!DOCTYPE html><body><title>X</title><meta name=z><link rel=foo><style>
+x { content:"</style" } </style>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+| <meta>
+| name="z"
+| <link>
+| rel="foo"
+| <style>
+| "
+x { content:"</style" } "
+
+#data
+<!DOCTYPE html><select><optgroup></optgroup></select>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <optgroup>
+
+#data
+
+
+#errors
+(2,1): expected-doctype-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html> <html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><script>
+</script> <title>x</title> </head>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <script>
+| "
+"
+| " "
+| <title>
+| "x"
+| " "
+| <body>
+
+#data
+<!DOCTYPE html><html><body><html id=x>
+#errors
+(1,38): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| id="x"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html>X</body><html id="x">
+#errors
+(1,36): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| id="x"
+| <head>
+| <body>
+| "X"
+
+#data
+<!DOCTYPE html><head><html id=x>
+#errors
+(1,32): non-html-root
+#document
+| <!DOCTYPE html>
+| <html>
+| id="x"
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html>X</html>X
+#errors
+(1,24): expected-eof-but-got-char
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "XX"
+
+#data
+<!DOCTYPE html>X</html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X "
+
+#data
+<!DOCTYPE html>X</html><p>X
+#errors
+(1,26): expected-eof-but-got-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <p>
+| "X"
+
+#data
+<!DOCTYPE html>X<p/x/y/z>
+#errors
+(1,19): unexpected-character-after-solidus-in-tag
+(1,21): unexpected-character-after-solidus-in-tag
+(1,23): unexpected-character-after-solidus-in-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <p>
+| x=""
+| y=""
+| z=""
+
+#data
+<!DOCTYPE html><!--x--
+#errors
+(1,22): eof-in-comment-double-dash
+#document
+| <!DOCTYPE html>
+| <!-- x -->
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE html><table><tr><td></p></table>
+#errors
+(1,34): unexpected-end-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <p>
+
+#data
+<!DOCTYPE <!DOCTYPE HTML>><!--<!--x-->-->
+#errors
+(1,20): expected-space-or-right-bracket-in-doctype
+(1,25): unknown-doctype
+(1,35): unexpected-char-in-comment
+#document
+| <!DOCTYPE <!doctype>
+| <html>
+| <head>
+| <body>
+| ">"
+| <!-- <!--x -->
+| "-->"
+
+#data
+<!doctype html><div><form></form><div></div></div>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <form>
+| <div>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests20.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests20.dat
new file mode 100644
index 000000000..52c5acdc6
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests20.dat
@@ -0,0 +1,516 @@
+#data
+<!doctype html><p><button><button>
+#errors
+(1,34): unexpected-start-tag-implies-end-tag
+(1,34): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <button>
+
+#data
+<!doctype html><p><button><address>
+#errors
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <address>
+
+#data
+<!doctype html><p><button><blockquote>
+#errors
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <blockquote>
+
+#data
+<!doctype html><p><button><menu>
+#errors
+(1,32): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <menu>
+
+#data
+<!doctype html><p><button><p>
+#errors
+(1,29): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <p>
+
+#data
+<!doctype html><p><button><ul>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <ul>
+
+#data
+<!doctype html><p><button><h1>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <h1>
+
+#data
+<!doctype html><p><button><h6>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <h6>
+
+#data
+<!doctype html><p><button><listing>
+#errors
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <listing>
+
+#data
+<!doctype html><p><button><pre>
+#errors
+(1,31): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <pre>
+
+#data
+<!doctype html><p><button><form>
+#errors
+(1,32): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <form>
+
+#data
+<!doctype html><p><button><li>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <li>
+
+#data
+<!doctype html><p><button><dd>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <dd>
+
+#data
+<!doctype html><p><button><dt>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <dt>
+
+#data
+<!doctype html><p><button><plaintext>
+#errors
+(1,37): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <plaintext>
+
+#data
+<!doctype html><p><button><table>
+#errors
+(1,33): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <table>
+
+#data
+<!doctype html><p><button><hr>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <hr>
+
+#data
+<!doctype html><p><button><xmp>
+#errors
+(1,31): expected-named-closing-tag-but-got-eof
+(1,31): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <xmp>
+
+#data
+<!doctype html><p><button></p>
+#errors
+(1,30): unexpected-end-tag
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <button>
+| <p>
+
+#data
+<!doctype html><address><button></address>a
+#errors
+(1,42): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <address>
+| <button>
+| "a"
+
+#data
+<!doctype html><address><button></address>a
+#errors
+(1,42): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <address>
+| <button>
+| "a"
+
+#data
+<p><table></p>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,14): unexpected-end-tag-implies-table-voodoo
+(1,14): unexpected-end-tag
+(1,14): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <p>
+| <table>
+
+#data
+<!doctype html><svg>
+#errors
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<!doctype html><p><figcaption>
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <figcaption>
+
+#data
+<!doctype html><p><summary>
+#errors
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <summary>
+
+#data
+<!doctype html><form><table><form>
+#errors
+(1,34): unexpected-form-in-table
+(1,34): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <table>
+
+#data
+<!doctype html><table><form><form>
+#errors
+(1,28): unexpected-form-in-table
+(1,34): unexpected-form-in-table
+(1,34): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <form>
+
+#data
+<!doctype html><table><form></table><form>
+#errors
+(1,28): unexpected-form-in-table
+(1,42): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <form>
+
+#data
+<!doctype html><svg><foreignObject><p>
+#errors
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg foreignObject>
+| <p>
+
+#data
+<!doctype html><svg><title>abc
+#errors
+(1,30): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| "abc"
+
+#data
+<option><span><option>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,22): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <option>
+| <span>
+| <option>
+
+#data
+<option><option>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <option>
+| <option>
+
+#data
+<math><annotation-xml><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,27): unexpected-html-element-in-foreign-content
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <div>
+
+#data
+<math><annotation-xml encoding="application/svg+xml"><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,58): unexpected-html-element-in-foreign-content
+(1,58): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="application/svg+xml"
+| <div>
+
+#data
+<math><annotation-xml encoding="application/xhtml+xml"><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,60): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="application/xhtml+xml"
+| <div>
+
+#data
+<math><annotation-xml encoding="aPPlication/xhtmL+xMl"><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,60): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="aPPlication/xhtmL+xMl"
+| <div>
+
+#data
+<math><annotation-xml encoding="text/html"><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,48): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="text/html"
+| <div>
+
+#data
+<math><annotation-xml encoding="Text/htmL"><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,48): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding="Text/htmL"
+| <div>
+
+#data
+<math><annotation-xml encoding=" text/html "><div>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,50): unexpected-html-element-in-foreign-content
+(1,50): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| encoding=" text/html "
+| <div>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests21.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests21.dat
new file mode 100644
index 000000000..d384a5556
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests21.dat
@@ -0,0 +1,305 @@
+#data
+<svg><![CDATA[foo]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo"
+
+#data
+<math><![CDATA[foo]]>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| "foo"
+
+#data
+<div><![CDATA[foo]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,7): expected-dashes-or-doctype
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <!-- [CDATA[foo]] -->
+
+#data
+<svg><![CDATA[foo
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo"
+
+#data
+<svg><![CDATA[foo
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo"
+
+#data
+<svg><![CDATA[
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,14): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<svg><![CDATA[]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+
+#data
+<svg><![CDATA[]] >]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]] >"
+
+#data
+<svg><![CDATA[]] >]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]] >"
+
+#data
+<svg><![CDATA[]]
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]]"
+
+#data
+<svg><![CDATA[]
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]"
+
+#data
+<svg><![CDATA[]>a
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "]>a"
+
+#data
+<!DOCTYPE html><svg><![CDATA[foo]]]>
+#errors
+(1,36): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo]"
+
+#data
+<!DOCTYPE html><svg><![CDATA[foo]]]]>
+#errors
+(1,37): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo]]"
+
+#data
+<!DOCTYPE html><svg><![CDATA[foo]]]]]>
+#errors
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "foo]]]"
+
+#data
+<svg><foreignObject><div><![CDATA[foo]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,27): expected-dashes-or-doctype
+(1,40): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg foreignObject>
+| <div>
+| <!-- [CDATA[foo]] -->
+
+#data
+<svg><![CDATA[<svg>]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,22): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+
+#data
+<svg><![CDATA[</svg>a]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,24): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "</svg>a"
+
+#data
+<svg><![CDATA[<svg>a
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>a"
+
+#data
+<svg><![CDATA[</svg>a
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "</svg>a"
+
+#data
+<svg><![CDATA[<svg>]]><path>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,28): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+| <svg path>
+
+#data
+<svg><![CDATA[<svg>]]></path>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,29): unexpected-end-tag
+(1,29): unexpected-end-tag
+(1,29): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+
+#data
+<svg><![CDATA[<svg>]]><!--path-->
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>"
+| <!-- path -->
+
+#data
+<svg><![CDATA[<svg>]]>path
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,26): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<svg>path"
+
+#data
+<svg><![CDATA[<!--svg-->]]>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| "<!--svg-->"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests22.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests22.dat
new file mode 100644
index 000000000..31e6d9e33
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests22.dat
@@ -0,0 +1,190 @@
+#data
+<a><b><big><em><strong><div>X</a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,33): adoption-agency-1.3
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <big>
+| <em>
+| <strong>
+| <big>
+| <em>
+| <strong>
+| <div>
+| <a>
+| "X"
+
+#data
+<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8>A</a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): adoption-agency-1.3
+(1,91): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <div>
+| id="1"
+| <a>
+| <div>
+| id="2"
+| <a>
+| <div>
+| id="3"
+| <a>
+| <div>
+| id="4"
+| <a>
+| <div>
+| id="5"
+| <a>
+| <div>
+| id="6"
+| <a>
+| <div>
+| id="7"
+| <a>
+| <div>
+| id="8"
+| <a>
+| "A"
+
+#data
+<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9>A</a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): adoption-agency-1.3
+(1,101): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <div>
+| id="1"
+| <a>
+| <div>
+| id="2"
+| <a>
+| <div>
+| id="3"
+| <a>
+| <div>
+| id="4"
+| <a>
+| <div>
+| id="5"
+| <a>
+| <div>
+| id="6"
+| <a>
+| <div>
+| id="7"
+| <a>
+| <div>
+| id="8"
+| <a>
+| <div>
+| id="9"
+| "A"
+
+#data
+<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9><div id=10>A</a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): adoption-agency-1.3
+(1,112): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <b>
+| <b>
+| <div>
+| id="1"
+| <a>
+| <div>
+| id="2"
+| <a>
+| <div>
+| id="3"
+| <a>
+| <div>
+| id="4"
+| <a>
+| <div>
+| id="5"
+| <a>
+| <div>
+| id="6"
+| <a>
+| <div>
+| id="7"
+| <a>
+| <div>
+| id="8"
+| <a>
+| <div>
+| id="9"
+| <div>
+| id="10"
+| "A"
+
+#data
+<cite><b><cite><i><cite><i><cite><i><div>X</b>TEST
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,46): adoption-agency-1.3
+(1,50): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <cite>
+| <b>
+| <cite>
+| <i>
+| <cite>
+| <i>
+| <cite>
+| <i>
+| <i>
+| <i>
+| <div>
+| <b>
+| "X"
+| "TEST"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests23.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests23.dat
new file mode 100644
index 000000000..49e4a4ace
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests23.dat
@@ -0,0 +1,168 @@
+#data
+<p><font size=4><font color=red><font size=4><font size=4><font size=4><font size=4><font size=4><font color=red><p>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,116): unexpected-end-tag
+(1,117): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| color="red"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| color="red"
+| <p>
+| <font>
+| color="red"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| color="red"
+| "X"
+
+#data
+<p><font size=4><font size=4><font size=4><font size=4><p>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,58): unexpected-end-tag
+(1,59): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| "X"
+
+#data
+<p><font size=4><font size=4><font size=4><font size="5"><font size=4><p>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,73): unexpected-end-tag
+(1,74): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="5"
+| <font>
+| size="4"
+| <p>
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="5"
+| <font>
+| size="4"
+| "X"
+
+#data
+<p><font size=4 id=a><font size=4 id=b><font size=4><font size=4><p>X
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,68): unexpected-end-tag
+(1,69): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <font>
+| id="a"
+| size="4"
+| <font>
+| id="b"
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| <p>
+| <font>
+| id="a"
+| size="4"
+| <font>
+| id="b"
+| size="4"
+| <font>
+| size="4"
+| <font>
+| size="4"
+| "X"
+
+#data
+<p><b id=a><b id=a><b id=a><b><object><b id=a><b id=a>X</object><p>Y
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,64): end-tag-too-early
+(1,67): unexpected-end-tag
+(1,68): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| <object>
+| <b>
+| id="a"
+| <b>
+| id="a"
+| "X"
+| <p>
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| id="a"
+| <b>
+| "Y"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests24.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests24.dat
new file mode 100644
index 000000000..f6dc7eb48
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests24.dat
@@ -0,0 +1,79 @@
+#data
+<!DOCTYPE html>&NotEqualTilde;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "≂̸"
+
+#data
+<!DOCTYPE html>&NotEqualTilde;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "≂̸A"
+
+#data
+<!DOCTYPE html>&ThickSpace;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "âŸâ€Š"
+
+#data
+<!DOCTYPE html>&ThickSpace;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "âŸâ€ŠA"
+
+#data
+<!DOCTYPE html>&NotSubset;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "⊂⃒"
+
+#data
+<!DOCTYPE html>&NotSubset;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "⊂⃒A"
+
+#data
+<!DOCTYPE html>&Gopf;
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "ð”¾"
+
+#data
+<!DOCTYPE html>&Gopf;A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "ð”¾A"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests25.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests25.dat
new file mode 100644
index 000000000..103574ee4
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests25.dat
@@ -0,0 +1,232 @@
+#data
+<!DOCTYPE html><body><foo>A
+#errors
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <foo>
+| "A"
+
+#data
+<!DOCTYPE html><body><area>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <area>
+| "A"
+
+#data
+<!DOCTYPE html><body><base>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <base>
+| "A"
+
+#data
+<!DOCTYPE html><body><basefont>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <basefont>
+| "A"
+
+#data
+<!DOCTYPE html><body><bgsound>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <bgsound>
+| "A"
+
+#data
+<!DOCTYPE html><body><br>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <br>
+| "A"
+
+#data
+<!DOCTYPE html><body><col>A
+#errors
+(1,26): unexpected-start-tag-ignored
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "A"
+
+#data
+<!DOCTYPE html><body><command>A
+#errors
+eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <command>
+| "A"
+
+#data
+<!DOCTYPE html><body><menuitem>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <menuitem>
+| "A"
+
+#data
+<!DOCTYPE html><body><embed>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <embed>
+| "A"
+
+#data
+<!DOCTYPE html><body><frame>A
+#errors
+(1,28): unexpected-start-tag-ignored
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "A"
+
+#data
+<!DOCTYPE html><body><hr>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <hr>
+| "A"
+
+#data
+<!DOCTYPE html><body><img>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <img>
+| "A"
+
+#data
+<!DOCTYPE html><body><input>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+| "A"
+
+#data
+<!DOCTYPE html><body><keygen>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <keygen>
+| "A"
+
+#data
+<!DOCTYPE html><body><link>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <link>
+| "A"
+
+#data
+<!DOCTYPE html><body><meta>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <meta>
+| "A"
+
+#data
+<!DOCTYPE html><body><param>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <param>
+| "A"
+
+#data
+<!DOCTYPE html><body><source>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <source>
+| "A"
+
+#data
+<!DOCTYPE html><body><track>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <track>
+| "A"
+
+#data
+<!DOCTYPE html><body><wbr>A
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <wbr>
+| "A"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests26.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests26.dat
new file mode 100644
index 000000000..8964624a4
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests26.dat
@@ -0,0 +1,388 @@
+#data
+<!DOCTYPE html><body><a href='#1'><nobr>1<nobr></a><br><a href='#2'><nobr>2<nobr></a><br><a href='#3'><nobr>3<nobr></a>
+#errors
+(1,47): unexpected-start-tag-implies-end-tag
+(1,51): adoption-agency-1.3
+(1,74): unexpected-start-tag-implies-end-tag
+(1,74): adoption-agency-1.3
+(1,81): unexpected-start-tag-implies-end-tag
+(1,85): adoption-agency-1.3
+(1,108): unexpected-start-tag-implies-end-tag
+(1,108): adoption-agency-1.3
+(1,115): unexpected-start-tag-implies-end-tag
+(1,119): adoption-agency-1.3
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <a>
+| href="#1"
+| <nobr>
+| "1"
+| <nobr>
+| <nobr>
+| <br>
+| <a>
+| href="#2"
+| <a>
+| href="#2"
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| <br>
+| <a>
+| href="#3"
+| <a>
+| href="#3"
+| <nobr>
+| "3"
+| <nobr>
+
+#data
+<!DOCTYPE html><body><b><nobr>1<nobr></b><i><nobr>2<nobr></i>3
+#errors
+(1,37): unexpected-start-tag-implies-end-tag
+(1,41): adoption-agency-1.3
+(1,50): unexpected-start-tag-implies-end-tag
+(1,50): adoption-agency-1.3
+(1,57): unexpected-start-tag-implies-end-tag
+(1,61): adoption-agency-1.3
+(1,62): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3
+#errors
+(1,44): foster-parenting-start-tag
+(1,48): foster-parenting-end-tag
+(1,48): adoption-agency-1.3
+(1,51): foster-parenting-start-tag
+(1,57): foster-parenting-start-tag
+(1,57): nobr-already-in-scope
+(1,57): adoption-agency-1.2
+(1,58): foster-parenting-character
+(1,64): foster-parenting-start-tag
+(1,64): nobr-already-in-scope
+(1,68): foster-parenting-end-tag
+(1,68): adoption-agency-1.2
+(1,69): foster-parenting-character
+(1,69): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+| <table>
+
+#data
+<!DOCTYPE html><body><b><nobr>1<table><tr><td><nobr></b><i><nobr>2<nobr></i>3
+#errors
+(1,56): unexpected-end-tag
+(1,65): unexpected-start-tag-implies-end-tag
+(1,65): adoption-agency-1.3
+(1,72): unexpected-start-tag-implies-end-tag
+(1,76): adoption-agency-1.3
+(1,77): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<div><nobr></b><i><nobr>2<nobr></i>3
+#errors
+(1,42): unexpected-start-tag-implies-end-tag
+(1,42): adoption-agency-1.3
+(1,46): adoption-agency-1.3
+(1,46): adoption-agency-1.3
+(1,55): unexpected-start-tag-implies-end-tag
+(1,55): adoption-agency-1.3
+(1,62): unexpected-start-tag-implies-end-tag
+(1,66): adoption-agency-1.3
+(1,67): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <div>
+| <b>
+| <nobr>
+| <nobr>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<nobr></b><div><i><nobr>2<nobr></i>3
+#errors
+(1,37): unexpected-start-tag-implies-end-tag
+(1,41): adoption-agency-1.3
+(1,55): unexpected-start-tag-implies-end-tag
+(1,55): adoption-agency-1.3
+(1,62): unexpected-start-tag-implies-end-tag
+(1,66): adoption-agency-1.3
+(1,67): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <div>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+| <nobr>
+| <nobr>
+| "3"
+
+#data
+<!DOCTYPE html><body><b><nobr>1<nobr><ins></b><i><nobr>
+#errors
+(1,37): unexpected-start-tag-implies-end-tag
+(1,46): adoption-agency-1.3
+(1,55): unexpected-start-tag-implies-end-tag
+(1,55): adoption-agency-1.3
+(1,55): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <nobr>
+| <ins>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+
+#data
+<!DOCTYPE html><body><b><nobr>1<ins><nobr></b><i>2
+#errors
+(1,42): unexpected-start-tag-implies-end-tag
+(1,42): adoption-agency-1.3
+(1,46): adoption-agency-1.3
+(1,50): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| <nobr>
+| "1"
+| <ins>
+| <nobr>
+| <nobr>
+| <i>
+| "2"
+
+#data
+<!DOCTYPE html><body><b>1<nobr></b><i><nobr>2</i>
+#errors
+(1,35): adoption-agency-1.3
+(1,44): unexpected-start-tag-implies-end-tag
+(1,44): adoption-agency-1.3
+(1,49): adoption-agency-1.3
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <b>
+| "1"
+| <nobr>
+| <nobr>
+| <i>
+| <i>
+| <nobr>
+| "2"
+
+#data
+<p><code x</code></p>
+
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,11): invalid-character-in-attribute-name
+(1,12): unexpected-character-after-solidus-in-tag
+(1,21): unexpected-end-tag
+(2,0): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <code>
+| code=""
+| x<=""
+| <code>
+| code=""
+| x<=""
+| "
+"
+
+#data
+<!DOCTYPE html><svg><foreignObject><p><i></p>a
+#errors
+(1,45): unexpected-end-tag
+(1,46): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg foreignObject>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><table><tr><td><svg><foreignObject><p><i></p>a
+#errors
+(1,60): unexpected-end-tag
+(1,61): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <svg svg>
+| <svg foreignObject>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><math><mtext><p><i></p>a
+#errors
+(1,38): unexpected-end-tag
+(1,39): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mtext>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><table><tr><td><math><mtext><p><i></p>a
+#errors
+(1,53): unexpected-end-tag
+(1,54): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <math math>
+| <math mtext>
+| <p>
+| <i>
+| <i>
+| "a"
+
+#data
+<!DOCTYPE html><body><div><!/div>a
+#errors
+(1,28): expected-dashes-or-doctype
+(1,34): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <div>
+| <!-- /div -->
+| "a"
+
+#data
+<button><p><button>
+#errors
+Line 1 Col 8 Unexpected start tag (button). Expected DOCTYPE.
+Line 1 Col 19 Unexpected start tag (button) implies end tag (button).
+Line 1 Col 19 Expected closing tag. Unexpected end of file.
+#document
+| <html>
+| <head>
+| <body>
+| <button>
+| <p>
+| <button>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests3.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests3.dat
new file mode 100644
index 000000000..c7583d99e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests3.dat
@@ -0,0 +1,305 @@
+#data
+<head></head><style></style>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,20): unexpected-start-tag-out-of-my-head
+#document
+| <html>
+| <head>
+| <style>
+| <body>
+
+#data
+<head></head><script></script>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,21): unexpected-start-tag-out-of-my-head
+#document
+| <html>
+| <head>
+| <script>
+| <body>
+
+#data
+<head></head><!-- --><style></style><!-- --><script></script>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,28): unexpected-start-tag-out-of-my-head
+(1,52): unexpected-start-tag-out-of-my-head
+#document
+| <html>
+| <head>
+| <style>
+| <script>
+| <!-- -->
+| <!-- -->
+| <body>
+
+#data
+<head></head><!-- -->x<style></style><!-- --><script></script>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <!-- -->
+| <body>
+| "x"
+| <style>
+| <!-- -->
+| <script>
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+foo</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "foo"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+
+foo</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "
+foo"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>
+foo
+</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "foo
+"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>x</pre><span>
+</span></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "x"
+| <span>
+| "
+"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>x
+y</pre></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "x
+y"
+
+#data
+<!DOCTYPE html><html><head></head><body><pre>x<div>
+y</pre></body></html>
+#errors
+(2,7): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "x"
+| <div>
+| "
+y"
+
+#data
+<!DOCTYPE html><pre>&#x0a;&#x0a;A</pre>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <pre>
+| "
+A"
+
+#data
+<!DOCTYPE html><HTML><META><HEAD></HEAD></HTML>
+#errors
+(1,33): two-heads-are-not-better-than-one
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <meta>
+| <body>
+
+#data
+<!DOCTYPE html><HTML><HEAD><head></HEAD></HTML>
+#errors
+(1,33): two-heads-are-not-better-than-one
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<textarea>foo<span>bar</span><i>baz
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,35): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "foo<span>bar</span><i>baz"
+
+#data
+<title>foo<span>bar</em><i>baz
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,30): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <title>
+| "foo<span>bar</em><i>baz"
+| <body>
+
+#data
+<!DOCTYPE html><textarea>
+</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+
+#data
+<!DOCTYPE html><textarea>
+foo</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "foo"
+
+#data
+<!DOCTYPE html><textarea>
+
+foo</textarea>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <textarea>
+| "
+foo"
+
+#data
+<!DOCTYPE html><html><head></head><body><ul><li><div><p><li></ul></body></html>
+#errors
+(1,60): end-tag-too-early
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <ul>
+| <li>
+| <div>
+| <p>
+| <li>
+
+#data
+<!doctype html><nobr><nobr><nobr>
+#errors
+(1,27): unexpected-start-tag-implies-end-tag
+(1,33): unexpected-start-tag-implies-end-tag
+(1,33): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <nobr>
+| <nobr>
+| <nobr>
+
+#data
+<!doctype html><nobr><nobr></nobr><nobr>
+#errors
+(1,27): unexpected-start-tag-implies-end-tag
+(1,40): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <nobr>
+| <nobr>
+| <nobr>
+
+#data
+<!doctype html><html><body><p><table></table></body></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <table>
+
+#data
+<p><table></table>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <table>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests4.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests4.dat
new file mode 100644
index 000000000..0a6174c36
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests4.dat
@@ -0,0 +1,58 @@
+#data
+direct div content
+#errors
+#document-fragment
+div
+#document
+| "direct div content"
+
+#data
+direct textarea content
+#errors
+#document-fragment
+textarea
+#document
+| "direct textarea content"
+
+#data
+textarea content with <em>pseudo</em> <foo>markup
+#errors
+#document-fragment
+textarea
+#document
+| "textarea content with <em>pseudo</em> <foo>markup"
+
+#data
+this is &#x0043;DATA inside a <style> element
+#errors
+#document-fragment
+style
+#document
+| "this is &#x0043;DATA inside a <style> element"
+
+#data
+</plaintext>
+#errors
+#document-fragment
+plaintext
+#document
+| "</plaintext>"
+
+#data
+setting html's innerHTML
+#errors
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| "setting html's innerHTML"
+
+#data
+<title>setting head's innerHTML</title>
+#errors
+#document-fragment
+head
+#document
+| <title>
+| "setting head's innerHTML"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests5.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests5.dat
new file mode 100644
index 000000000..1ef8cae42
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests5.dat
@@ -0,0 +1,210 @@
+#data
+<style> <!-- </style>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| " <!-- "
+| <body>
+| "x"
+
+#data
+<style> <!-- </style> --> </style>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,34): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <style>
+| " <!-- "
+| " "
+| <body>
+| "--> x"
+
+#data
+<style> <!--> </style>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| " <!--> "
+| <body>
+| "x"
+
+#data
+<style> <!---> </style>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| " <!---> "
+| <body>
+| "x"
+
+#data
+<iframe> <!---> </iframe>x
+#errors
+(1,8): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| " <!---> "
+| "x"
+
+#data
+<iframe> <!--- </iframe>->x</iframe> --> </iframe>x
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,36): unexpected-end-tag
+(1,50): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <iframe>
+| " <!--- "
+| "->x --> x"
+
+#data
+<script> <!-- </script> --> </script>x
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,37): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <script>
+| " <!-- "
+| " "
+| <body>
+| "--> x"
+
+#data
+<title> <!-- </title> --> </title>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,34): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <title>
+| " <!-- "
+| " "
+| <body>
+| "--> x"
+
+#data
+<textarea> <!--- </textarea>->x</textarea> --> </textarea>x
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,42): unexpected-end-tag
+(1,58): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <textarea>
+| " <!--- "
+| "->x --> x"
+
+#data
+<style> <!</-- </style>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <style>
+| " <!</-- "
+| <body>
+| "x"
+
+#data
+<p><xmp></xmp>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <xmp>
+
+#data
+<xmp> <!-- > --> </xmp>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <xmp>
+| " <!-- > --> "
+
+#data
+<title>&amp;</title>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <title>
+| "&"
+| <body>
+
+#data
+<title><!--&amp;--></title>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <title>
+| "<!--&-->"
+| <body>
+
+#data
+<title><!--</title>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <title>
+| "<!--"
+| <body>
+
+#data
+<noscript><!--</noscript>--></noscript>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,39): unexpected-end-tag
+#script-on
+#document
+| <html>
+| <head>
+| <noscript>
+| "<!--"
+| <body>
+| "-->"
+
+#data
+<noscript><!--</noscript>--></noscript>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#script-off
+#document
+| <html>
+| <head>
+| <noscript>
+| <!-- </noscript> -->
+| <body>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests6.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests6.dat
new file mode 100644
index 000000000..d774fc234
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests6.dat
@@ -0,0 +1,659 @@
+#data
+<!doctype html></head> <head>
+#errors
+(1,29): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| " "
+| <body>
+
+#data
+<!doctype html><form><div></form><div>
+#errors
+(1,33): end-tag-too-early-ignored
+(1,38): expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <form>
+| <div>
+| <div>
+
+#data
+<!doctype html><title>&amp;</title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "&"
+| <body>
+
+#data
+<!doctype html><title><!--&amp;--></title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "<!--&-->"
+| <body>
+
+#data
+<!doctype>
+#errors
+(1,9): need-space-after-doctype
+(1,10): expected-doctype-name-but-got-right-bracket
+(1,10): unknown-doctype
+#document
+| <!DOCTYPE >
+| <html>
+| <head>
+| <body>
+
+#data
+<!---x
+#errors
+(1,6): eof-in-comment
+(1,6): expected-doctype-but-got-eof
+#document
+| <!-- -x -->
+| <html>
+| <head>
+| <body>
+
+#data
+<body>
+<div>
+#errors
+(1,6): unexpected-start-tag
+(2,5): expected-closing-tag-but-got-eof
+#document-fragment
+div
+#document
+| "
+"
+| <div>
+
+#data
+<frameset></frameset>
+foo
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(2,1): unexpected-char-after-frameset
+(2,2): unexpected-char-after-frameset
+(2,3): unexpected-char-after-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<frameset></frameset>
+<noframes>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(2,10): expected-named-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+| <noframes>
+
+#data
+<frameset></frameset>
+<div>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(2,5): unexpected-start-tag-after-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<frameset></frameset>
+</html>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<frameset></frameset>
+</div>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(2,6): unexpected-end-tag-after-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+| "
+"
+
+#data
+<form><form>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,12): unexpected-start-tag
+(1,12): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <form>
+
+#data
+<button><button>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,16): unexpected-start-tag-implies-end-tag
+(1,16): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <button>
+| <button>
+
+#data
+<table><tr><td></th>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,20): unexpected-end-tag
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><caption><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,20): unexpected-cell-in-table-body
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><caption><div>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <div>
+
+#data
+</caption><div>
+#errors
+(1,10): XXX-undefined-error
+(1,15): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <div>
+
+#data
+<table><caption><div></caption>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,31): expected-one-end-tag-but-got-another
+(1,31): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <div>
+
+#data
+<table><caption></table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+
+#data
+</table><div>
+#errors
+(1,8): unexpected-end-tag
+(1,13): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <div>
+
+#data
+<table><caption></body></col></colgroup></html></tbody></td></tfoot></th></thead></tr>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,23): unexpected-end-tag
+(1,29): unexpected-end-tag
+(1,40): unexpected-end-tag
+(1,47): unexpected-end-tag
+(1,55): unexpected-end-tag
+(1,60): unexpected-end-tag
+(1,68): unexpected-end-tag
+(1,73): unexpected-end-tag
+(1,81): unexpected-end-tag
+(1,86): unexpected-end-tag
+(1,86): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+
+#data
+<table><caption><div></div>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <div>
+
+#data
+<table><tr><td></body></caption></col></colgroup></html>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,22): unexpected-end-tag
+(1,32): unexpected-end-tag
+(1,38): unexpected-end-tag
+(1,49): unexpected-end-tag
+(1,56): unexpected-end-tag
+(1,56): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+</table></tbody></tfoot></thead></tr><div>
+#errors
+(1,8): unexpected-end-tag
+(1,16): unexpected-end-tag
+(1,24): unexpected-end-tag
+(1,32): unexpected-end-tag
+(1,37): unexpected-end-tag
+(1,42): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <div>
+
+#data
+<table><colgroup>foo
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,18): foster-parenting-character-in-table
+(1,19): foster-parenting-character-in-table
+(1,20): foster-parenting-character-in-table
+(1,20): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| "foo"
+| <table>
+| <colgroup>
+
+#data
+foo<col>
+#errors
+(1,1): unexpected-character-in-colgroup
+(1,2): unexpected-character-in-colgroup
+(1,3): unexpected-character-in-colgroup
+#document-fragment
+colgroup
+#document
+| <col>
+
+#data
+<table><colgroup></col>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,23): no-end-tag
+(1,23): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <colgroup>
+
+#data
+<frameset><div>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,15): unexpected-start-tag-in-frameset
+(1,15): eof-in-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</frameset><frame>
+#errors
+(1,11): unexpected-frameset-in-frameset-innerhtml
+#document-fragment
+frameset
+#document
+| <frame>
+
+#data
+<frameset></div>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+(1,16): unexpected-end-tag-in-frameset
+(1,16): eof-in-frameset
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</body><div>
+#errors
+(1,7): unexpected-close-tag
+(1,12): expected-closing-tag-but-got-eof
+#document-fragment
+body
+#document
+| <div>
+
+#data
+<table><tr><div>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,16): unexpected-start-tag-implies-table-voodoo
+(1,16): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+</tr><td>
+#errors
+(1,5): unexpected-end-tag
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+</tbody></tfoot></thead><td>
+#errors
+(1,8): unexpected-end-tag
+(1,16): unexpected-end-tag
+(1,24): unexpected-end-tag
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<table><tr><div><td>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,16): foster-parenting-start-tag
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<caption><col><colgroup><tbody><tfoot><thead><tr>
+#errors
+(1,9): unexpected-start-tag
+(1,14): unexpected-start-tag
+(1,24): unexpected-start-tag
+(1,31): unexpected-start-tag
+(1,38): unexpected-start-tag
+(1,45): unexpected-start-tag
+#document-fragment
+tbody
+#document
+| <tr>
+
+#data
+<table><tbody></thead>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,22): unexpected-end-tag-in-table-body
+(1,22): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+
+#data
+</table><tr>
+#errors
+(1,8): unexpected-end-tag
+#document-fragment
+tbody
+#document
+| <tr>
+
+#data
+<table><tbody></body></caption></col></colgroup></html></td></th></tr>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,21): unexpected-end-tag-in-table-body
+(1,31): unexpected-end-tag-in-table-body
+(1,37): unexpected-end-tag-in-table-body
+(1,48): unexpected-end-tag-in-table-body
+(1,55): unexpected-end-tag-in-table-body
+(1,60): unexpected-end-tag-in-table-body
+(1,65): unexpected-end-tag-in-table-body
+(1,70): unexpected-end-tag-in-table-body
+(1,70): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+
+#data
+<table><tbody></div>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,20): unexpected-end-tag-implies-table-voodoo
+(1,20): end-tag-too-early
+(1,20): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+
+#data
+<table><table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,14): unexpected-start-tag-implies-end-tag
+(1,14): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <table>
+
+#data
+<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,14): unexpected-end-tag
+(1,24): unexpected-end-tag
+(1,30): unexpected-end-tag
+(1,41): unexpected-end-tag
+(1,48): unexpected-end-tag
+(1,56): unexpected-end-tag
+(1,61): unexpected-end-tag
+(1,69): unexpected-end-tag
+(1,74): unexpected-end-tag
+(1,82): unexpected-end-tag
+(1,87): unexpected-end-tag
+(1,87): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+
+#data
+</table><tr>
+#errors
+(1,8): unexpected-end-tag
+#document-fragment
+table
+#document
+| <tbody>
+| <tr>
+
+#data
+<body></body></html>
+#errors
+(1,20): unexpected-end-tag-after-body-innerhtml
+#document-fragment
+html
+#document
+| <head>
+| <body>
+
+#data
+<html><frameset></frameset></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+| " "
+
+#data
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"><html></html>
+#errors
+#document
+| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "">
+| <html>
+| <head>
+| <body>
+
+#data
+<param><frameset></frameset>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,17): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<source><frameset></frameset>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,18): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+<track><frameset></frameset>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,17): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</html><frameset></frameset>
+#errors
+(1,7): expected-doctype-but-got-end-tag
+(1,17): expected-eof-but-got-start-tag
+(1,17): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+
+#data
+</body><frameset></frameset>
+#errors
+(1,7): expected-doctype-but-got-end-tag
+(1,17): unexpected-start-tag-after-body
+(1,17): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests7.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests7.dat
new file mode 100644
index 000000000..d941d3d70
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests7.dat
@@ -0,0 +1,403 @@
+#data
+<!doctype html><body><title>X</title>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+
+#data
+<!doctype html><table><title>X</title></table>
+#errors
+(1,29): unexpected-start-tag-implies-table-voodoo
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <title>
+| "X"
+| <table>
+
+#data
+<!doctype html><head></head><title>X</title>
+#errors
+(1,35): unexpected-start-tag-out-of-my-head
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "X"
+| <body>
+
+#data
+<!doctype html></head><title>X</title>
+#errors
+(1,29): unexpected-start-tag-out-of-my-head
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <title>
+| "X"
+| <body>
+
+#data
+<!doctype html><table><meta></table>
+#errors
+(1,28): unexpected-start-tag-implies-table-voodoo
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <meta>
+| <table>
+
+#data
+<!doctype html><table>X<tr><td><table> <meta></table></table>
+#errors
+unexpected text in table
+(1,45): unexpected-start-tag-implies-table-voodoo
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <meta>
+| <table>
+| " "
+
+#data
+<!doctype html><html> <head>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!doctype html> <head>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!doctype html><table><style> <tr>x </style> </table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <style>
+| " <tr>x "
+| " "
+
+#data
+<!doctype html><table><TBODY><script> <tr>x </script> </table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <script>
+| " <tr>x "
+| " "
+
+#data
+<!doctype html><p><applet><p>X</p></applet>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <p>
+| <applet>
+| <p>
+| "X"
+
+#data
+<!doctype html><listing>
+X</listing>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <listing>
+| "X"
+
+#data
+<!doctype html><select><input>X
+#errors
+(1,30): unexpected-input-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <input>
+| "X"
+
+#data
+<!doctype html><select><select>X
+#errors
+(1,31): unexpected-select-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "X"
+
+#data
+<!doctype html><table><input type=hidDEN></table>
+#errors
+(1,41): unexpected-hidden-input-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table>X<input type=hidDEN></table>
+#errors
+(1,23): foster-parenting-character
+(1,42): unexpected-hidden-input-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| "X"
+| <table>
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table> <input type=hidDEN></table>
+#errors
+(1,43): unexpected-hidden-input-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table> <input type='hidDEN'></table>
+#errors
+(1,45): unexpected-hidden-input-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| " "
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table><input type=" hidden"><input type=hidDEN></table>
+#errors
+(1,44): unexpected-start-tag-implies-table-voodoo
+(1,63): unexpected-hidden-input-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+| type=" hidden"
+| <table>
+| <input>
+| type="hidDEN"
+
+#data
+<!doctype html><table><select>X<tr>
+#errors
+(1,30): unexpected-start-tag-implies-table-voodoo
+(1,35): unexpected-table-element-start-tag-in-select-in-table
+(1,35): eof-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "X"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!doctype html><select>X</select>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "X"
+
+#data
+<!DOCTYPE hTmL><html></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<!DOCTYPE HTML><html></html>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+
+#data
+<body>X</body></body>
+#errors
+(1,21): unexpected-end-tag-after-body
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| "X"
+
+#data
+<div><p>a</x> b
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,13): unexpected-end-tag
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <p>
+| "a b"
+
+#data
+<table><tr><td><code></code> </table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <code>
+| " "
+
+#data
+<table><b><tr><td>aaa</td></tr>bbb</table>ccc
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,10): foster-parenting-start-tag
+(1,32): foster-parenting-character
+(1,33): foster-parenting-character
+(1,34): foster-parenting-character
+(1,45): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <b>
+| "bbb"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "aaa"
+| <b>
+| "ccc"
+
+#data
+A<table><tr> B</tr> B</table>
+#errors
+(1,1): expected-doctype-but-got-chars
+(1,13): foster-parenting-character
+(1,14): foster-parenting-character
+(1,20): foster-parenting-character
+(1,21): foster-parenting-character
+#document
+| <html>
+| <head>
+| <body>
+| "A B B"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+A<table><tr> B</tr> </em>C</table>
+#errors
+(1,1): expected-doctype-but-got-chars
+(1,13): foster-parenting-character
+(1,14): foster-parenting-character
+(1,20): foster-parenting-character
+(1,25): unexpected-end-tag
+(1,26): foster-parenting-character
+#document
+| <html>
+| <head>
+| <body>
+| "A BC"
+| <table>
+| <tbody>
+| <tr>
+| " "
+
+#data
+<select><keygen>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,16): unexpected-input-in-select
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <keygen>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests8.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests8.dat
new file mode 100644
index 000000000..33dd96d33
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests8.dat
@@ -0,0 +1,151 @@
+#data
+<div>
+<div></div>
+</span>x
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(3,7): unexpected-end-tag
+(3,8): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "
+"
+| <div>
+| "
+x"
+
+#data
+<div>x<div></div>
+</span>x
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(2,7): unexpected-end-tag
+(2,8): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "
+x"
+
+#data
+<div>x<div></div>x</span>x
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,25): unexpected-end-tag
+(1,26): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "xx"
+
+#data
+<div>x<div></div>y</span>z
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,25): unexpected-end-tag
+(1,26): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "yz"
+
+#data
+<table><div>x<div></div>x</span>x
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,12): foster-parenting-start-tag
+(1,13): foster-parenting-character
+(1,18): foster-parenting-start-tag
+(1,24): foster-parenting-end-tag
+(1,25): foster-parenting-start-tag
+(1,32): foster-parenting-end-tag
+(1,32): unexpected-end-tag
+(1,33): foster-parenting-character
+(1,33): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "x"
+| <div>
+| "xx"
+| <table>
+
+#data
+x<table>x
+#errors
+(1,1): expected-doctype-but-got-chars
+(1,9): foster-parenting-character
+(1,9): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| "xx"
+| <table>
+
+#data
+x<table><table>x
+#errors
+(1,1): expected-doctype-but-got-chars
+(1,15): unexpected-start-tag-implies-end-tag
+(1,16): foster-parenting-character
+(1,16): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <table>
+| "x"
+| <table>
+
+#data
+<b>a<div></div><div></b>y
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,24): adoption-agency-1.3
+(1,25): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| "a"
+| <div>
+| <div>
+| <b>
+| "y"
+
+#data
+<a><div><p></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,15): adoption-agency-1.3
+(1,15): adoption-agency-1.3
+(1,15): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <div>
+| <a>
+| <p>
+| <a>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests9.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests9.dat
new file mode 100644
index 000000000..f8d04b23b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests9.dat
@@ -0,0 +1,472 @@
+#data
+<!DOCTYPE html><math></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+
+#data
+<!DOCTYPE html><body><math></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+
+#data
+<!DOCTYPE html><math><mi>
+#errors
+(1,25) expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+
+#data
+<!DOCTYPE html><math><annotation-xml><svg><u>
+#errors
+(1,45) unexpected-html-element-in-foreign-content
+(1,45) expected-closing-tag-but-got-eof
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math annotation-xml>
+| <svg svg>
+| <u>
+
+#data
+<!DOCTYPE html><body><select><math></math></select>
+#errors
+(1,35) unexpected-start-tag-in-select
+(1,42) unexpected-end-tag-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+
+#data
+<!DOCTYPE html><body><select><option><math></math></option></select>
+#errors
+(1,43) unexpected-start-tag-in-select
+(1,50) unexpected-end-tag-in-select
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+
+#data
+<!DOCTYPE html><body><table><math></math></table>
+#errors
+(1,34) unexpected-start-tag-implies-table-voodoo
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <table>
+
+#data
+<!DOCTYPE html><body><table><math><mi>foo</mi></math></table>
+#errors
+(1,34) foster-parenting-start-token
+(1,39) foster-parenting-character
+(1,40) foster-parenting-character
+(1,41) foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><math><mi>foo</mi><mi>bar</mi></math></table>
+#errors
+(1,34) foster-parenting-start-tag
+(1,39) foster-parenting-character
+(1,40) foster-parenting-character
+(1,41) foster-parenting-character
+(1,51) foster-parenting-character
+(1,52) foster-parenting-character
+(1,53) foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <table>
+
+#data
+<!DOCTYPE html><body><table><tbody><math><mi>foo</mi><mi>bar</mi></math></tbody></table>
+#errors
+(1,41) foster-parenting-start-tag
+(1,46) foster-parenting-character
+(1,47) foster-parenting-character
+(1,48) foster-parenting-character
+(1,58) foster-parenting-character
+(1,59) foster-parenting-character
+(1,60) foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <table>
+| <tbody>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><math><mi>foo</mi><mi>bar</mi></math></tr></tbody></table>
+#errors
+(1,45) foster-parenting-start-tag
+(1,50) foster-parenting-character
+(1,51) foster-parenting-character
+(1,52) foster-parenting-character
+(1,62) foster-parenting-character
+(1,63) foster-parenting-character
+(1,64) foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math></td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+
+#data
+<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math><p>baz</td></tr></tbody></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi></math><p>baz</caption></table>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+(1,70) unexpected-html-element-in-foreign-content
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi>baz</table><p>quux
+#errors
+(1,78) unexpected-end-tag
+(1,78) expected-one-end-tag-but-got-another
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <caption>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| "baz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><colgroup><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+(1,44) foster-parenting-start-tag
+(1,49) foster-parenting-character
+(1,50) foster-parenting-character
+(1,51) foster-parenting-character
+(1,61) foster-parenting-character
+(1,62) foster-parenting-character
+(1,63) foster-parenting-character
+(1,71) unexpected-html-element-in-foreign-content
+(1,71) foster-parenting-start-tag
+(1,63) foster-parenting-character
+(1,63) foster-parenting-character
+(1,63) foster-parenting-character
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+| <table>
+| <colgroup>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><tr><td><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+(1,50) unexpected-start-tag-in-select
+(1,54) unexpected-start-tag-in-select
+(1,62) unexpected-end-tag-in-select
+(1,66) unexpected-start-tag-in-select
+(1,74) unexpected-end-tag-in-select
+(1,77) unexpected-start-tag-in-select
+(1,88) unexpected-table-element-end-tag-in-select-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <select>
+| "foobarbaz"
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body><table><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
+#errors
+(1,36) unexpected-start-tag-implies-table-voodoo
+(1,42) unexpected-start-tag-in-select
+(1,46) unexpected-start-tag-in-select
+(1,54) unexpected-end-tag-in-select
+(1,58) unexpected-start-tag-in-select
+(1,66) unexpected-end-tag-in-select
+(1,69) unexpected-start-tag-in-select
+(1,80) unexpected-table-element-end-tag-in-select-in-table
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <select>
+| "foobarbaz"
+| <table>
+| <p>
+| "quux"
+
+#data
+<!DOCTYPE html><body></body></html><math><mi>foo</mi><mi>bar</mi><p>baz
+#errors
+(1,41) expected-eof-but-got-start-tag
+(1,68) unexpected-html-element-in-foreign-content
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><body></body><math><mi>foo</mi><mi>bar</mi><p>baz
+#errors
+(1,34) unexpected-start-tag-after-body
+(1,61) unexpected-html-element-in-foreign-content
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mi>
+| "foo"
+| <math mi>
+| "bar"
+| <p>
+| "baz"
+
+#data
+<!DOCTYPE html><frameset><math><mi></mi><mi></mi><p><span>
+#errors
+(1,31) unexpected-start-tag-in-frameset
+(1,35) unexpected-start-tag-in-frameset
+(1,40) unexpected-end-tag-in-frameset
+(1,44) unexpected-start-tag-in-frameset
+(1,49) unexpected-end-tag-in-frameset
+(1,52) unexpected-start-tag-in-frameset
+(1,58) unexpected-start-tag-in-frameset
+(1,58) eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><frameset></frameset><math><mi></mi><mi></mi><p><span>
+#errors
+(1,42) unexpected-start-tag-after-frameset
+(1,46) unexpected-start-tag-after-frameset
+(1,51) unexpected-end-tag-after-frameset
+(1,55) unexpected-start-tag-after-frameset
+(1,60) unexpected-end-tag-after-frameset
+(1,63) unexpected-start-tag-after-frameset
+(1,69) unexpected-start-tag-after-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!DOCTYPE html><body xlink:href=foo><math xlink:href=foo></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| <math math>
+| xlink href="foo"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo></mi></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <math math>
+| <math mi>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo /></math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <math math>
+| <math mi>
+| xlink href="foo"
+| xml lang="en"
+
+#data
+<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo />bar</math>
+#errors
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| xlink:href="foo"
+| xml:lang="en"
+| <math math>
+| <math mi>
+| xlink href="foo"
+| xml lang="en"
+| "bar"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests_innerHTML_1.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests_innerHTML_1.dat
new file mode 100644
index 000000000..9c5c18220
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tests_innerHTML_1.dat
@@ -0,0 +1,891 @@
+#data
+<body><span>
+#errors
+(1,6): unexpected-start-tag
+(1,12): expected-closing-tag-but-got-eof
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><body>
+#errors
+(1,12): unexpected-start-tag
+(1,12): expected-closing-tag-but-got-eof
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><body>
+#errors
+(1,12): unexpected-start-tag
+(1,12): expected-closing-tag-but-got-eof
+#document-fragment
+div
+#document
+| <span>
+
+#data
+<body><span>
+#errors
+(1,12): expected-closing-tag-but-got-eof
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| <span>
+
+#data
+<frameset><span>
+#errors
+(1,10): unexpected-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><frameset>
+#errors
+(1,16): unexpected-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+body
+#document
+| <span>
+
+#data
+<span><frameset>
+#errors
+(1,16): unexpected-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+div
+#document
+| <span>
+
+#data
+<frameset><span>
+#errors
+(1,16): unexpected-start-tag-in-frameset
+(1,16): eof-in-frameset
+#document-fragment
+html
+#document
+| <head>
+| <frameset>
+
+#data
+<table><tr>
+#errors
+(1,7): unexpected-start-tag
+#document-fragment
+table
+#document
+| <tbody>
+| <tr>
+
+#data
+</table><tr>
+#errors
+(1,8): unexpected-end-tag
+#document-fragment
+table
+#document
+| <tbody>
+| <tr>
+
+#data
+<a>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,3): eof-in-table
+#document-fragment
+table
+#document
+| <a>
+
+#data
+<a>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,3): eof-in-table
+#document-fragment
+table
+#document
+| <a>
+
+#data
+<a><caption>a
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,13): expected-closing-tag-but-got-eof
+#document-fragment
+table
+#document
+| <a>
+| <caption>
+| "a"
+
+#data
+<a><colgroup><col>
+#errors
+(1,3): foster-parenting-start-token
+(1,18): expected-closing-tag-but-got-eof
+#document-fragment
+table
+#document
+| <a>
+| <colgroup>
+| <col>
+
+#data
+<a><tbody><tr>
+#errors
+(1,3): foster-parenting-start-tag
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+
+#data
+<a><tfoot><tr>
+#errors
+(1,3): foster-parenting-start-tag
+#document-fragment
+table
+#document
+| <a>
+| <tfoot>
+| <tr>
+
+#data
+<a><thead><tr>
+#errors
+(1,3): foster-parenting-start-tag
+#document-fragment
+table
+#document
+| <a>
+| <thead>
+| <tr>
+
+#data
+<a><tr>
+#errors
+(1,3): foster-parenting-start-tag
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+
+#data
+<a><th>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,7): unexpected-cell-in-table-body
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+| <th>
+
+#data
+<a><td>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,7): unexpected-cell-in-table-body
+#document-fragment
+table
+#document
+| <a>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table></table><tbody>
+#errors
+(1,22): unexpected-start-tag
+#document-fragment
+caption
+#document
+| <table>
+
+#data
+</table><span>
+#errors
+(1,8): unexpected-end-tag
+(1,14): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+
+#data
+<span></table>
+#errors
+(1,14): unexpected-end-tag
+(1,14): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+
+#data
+</caption><span>
+#errors
+(1,10): XXX-undefined-error
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+
+#data
+<span></caption><span>
+#errors
+(1,16): XXX-undefined-error
+(1,22): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><caption><span>
+#errors
+(1,15): unexpected-start-tag
+(1,21): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><col><span>
+#errors
+(1,11): unexpected-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><colgroup><span>
+#errors
+(1,16): unexpected-start-tag
+(1,22): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><html><span>
+#errors
+(1,12): non-html-root
+(1,18): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><tbody><span>
+#errors
+(1,13): unexpected-start-tag
+(1,19): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><td><span>
+#errors
+(1,10): unexpected-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><tfoot><span>
+#errors
+(1,13): unexpected-start-tag
+(1,19): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><thead><span>
+#errors
+(1,13): unexpected-start-tag
+(1,19): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><th><span>
+#errors
+(1,10): unexpected-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span><tr><span>
+#errors
+(1,10): unexpected-start-tag
+(1,16): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+<span></table><span>
+#errors
+(1,14): unexpected-end-tag
+(1,20): expected-closing-tag-but-got-eof
+#document-fragment
+caption
+#document
+| <span>
+| <span>
+
+#data
+</colgroup><col>
+#errors
+(1,11): XXX-undefined-error
+#document-fragment
+colgroup
+#document
+| <col>
+
+#data
+<a><col>
+#errors
+(1,3): XXX-undefined-error
+#document-fragment
+colgroup
+#document
+| <col>
+
+#data
+<caption><a>
+#errors
+(1,9): XXX-undefined-error
+(1,12): unexpected-start-tag-implies-table-voodoo
+(1,12): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<col><a>
+#errors
+(1,5): XXX-undefined-error
+(1,8): unexpected-start-tag-implies-table-voodoo
+(1,8): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<colgroup><a>
+#errors
+(1,10): XXX-undefined-error
+(1,13): unexpected-start-tag-implies-table-voodoo
+(1,13): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<tbody><a>
+#errors
+(1,7): XXX-undefined-error
+(1,10): unexpected-start-tag-implies-table-voodoo
+(1,10): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<tfoot><a>
+#errors
+(1,7): XXX-undefined-error
+(1,10): unexpected-start-tag-implies-table-voodoo
+(1,10): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<thead><a>
+#errors
+(1,7): XXX-undefined-error
+(1,10): unexpected-start-tag-implies-table-voodoo
+(1,10): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+</table><a>
+#errors
+(1,8): XXX-undefined-error
+(1,11): unexpected-start-tag-implies-table-voodoo
+(1,11): eof-in-table
+#document-fragment
+tbody
+#document
+| <a>
+
+#data
+<a><tr>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+
+#data
+<a><td>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,7): unexpected-cell-in-table-body
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+| <td>
+
+#data
+<a><td>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,7): unexpected-cell-in-table-body
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+| <td>
+
+#data
+<a><td>
+#errors
+(1,3): unexpected-start-tag-implies-table-voodoo
+(1,7): unexpected-cell-in-table-body
+#document-fragment
+tbody
+#document
+| <a>
+| <tr>
+| <td>
+
+#data
+<td><table><tbody><a><tr>
+#errors
+(1,4): unexpected-cell-in-table-body
+(1,21): unexpected-start-tag-implies-table-voodoo
+(1,25): eof-in-table
+#document-fragment
+tbody
+#document
+| <tr>
+| <td>
+| <a>
+| <table>
+| <tbody>
+| <tr>
+
+#data
+</tr><td>
+#errors
+(1,5): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<td><table><a><tr></tr><tr>
+#errors
+(1,14): unexpected-start-tag-implies-table-voodoo
+(1,27): eof-in-table
+#document-fragment
+tr
+#document
+| <td>
+| <a>
+| <table>
+| <tbody>
+| <tr>
+| <tr>
+
+#data
+<caption><td>
+#errors
+(1,9): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<col><td>
+#errors
+(1,5): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<colgroup><td>
+#errors
+(1,10): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<tbody><td>
+#errors
+(1,7): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<tfoot><td>
+#errors
+(1,7): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<thead><td>
+#errors
+(1,7): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<tr><td>
+#errors
+(1,4): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+</table><td>
+#errors
+(1,8): XXX-undefined-error
+#document-fragment
+tr
+#document
+| <td>
+
+#data
+<td><table></table><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+| <table>
+| <td>
+
+#data
+<td><table></table><td>
+#errors
+#document-fragment
+tr
+#document
+| <td>
+| <table>
+| <td>
+
+#data
+<caption><a>
+#errors
+(1,9): XXX-undefined-error
+(1,12): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<col><a>
+#errors
+(1,5): XXX-undefined-error
+(1,8): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<colgroup><a>
+#errors
+(1,10): XXX-undefined-error
+(1,13): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<tbody><a>
+#errors
+(1,7): XXX-undefined-error
+(1,10): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<tfoot><a>
+#errors
+(1,7): XXX-undefined-error
+(1,10): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<th><a>
+#errors
+(1,4): XXX-undefined-error
+(1,7): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<thead><a>
+#errors
+(1,7): XXX-undefined-error
+(1,10): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<tr><a>
+#errors
+(1,4): XXX-undefined-error
+(1,7): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</table><a>
+#errors
+(1,8): XXX-undefined-error
+(1,11): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</tbody><a>
+#errors
+(1,8): XXX-undefined-error
+(1,11): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</td><a>
+#errors
+(1,5): unexpected-end-tag
+(1,8): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</tfoot><a>
+#errors
+(1,8): XXX-undefined-error
+(1,11): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</thead><a>
+#errors
+(1,8): XXX-undefined-error
+(1,11): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</th><a>
+#errors
+(1,5): unexpected-end-tag
+(1,8): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+</tr><a>
+#errors
+(1,5): XXX-undefined-error
+(1,8): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <a>
+
+#data
+<table><td><td>
+#errors
+(1,11): unexpected-cell-in-table-body
+(1,15): expected-closing-tag-but-got-eof
+#document-fragment
+td
+#document
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| <td>
+
+#data
+</select><option>
+#errors
+(1,9): XXX-undefined-error
+(1,17): eof-in-select
+#document-fragment
+select
+#document
+| <option>
+
+#data
+<input><option>
+#errors
+(1,7): unexpected-input-in-select
+(1,15): eof-in-select
+#document-fragment
+select
+#document
+| <option>
+
+#data
+<keygen><option>
+#errors
+(1,8): unexpected-input-in-select
+(1,16): eof-in-select
+#document-fragment
+select
+#document
+| <option>
+
+#data
+<textarea><option>
+#errors
+(1,10): unexpected-input-in-select
+(1,18): eof-in-select
+#document-fragment
+select
+#document
+| <option>
+
+#data
+</html><!--abc-->
+#errors
+(1,7): unexpected-end-tag-after-body-innerhtml
+#document-fragment
+html
+#document
+| <head>
+| <body>
+| <!-- abc -->
+
+#data
+</frameset><frame>
+#errors
+(1,11): unexpected-frameset-in-frameset-innerhtml
+#document-fragment
+frameset
+#document
+| <frame>
+
+#data
+#errors
+#document-fragment
+html
+#document
+| <head>
+| <body>
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tricky01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tricky01.dat
new file mode 100644
index 000000000..753502a26
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/tricky01.dat
@@ -0,0 +1,336 @@
+#data
+<b><p>Bold </b> Not bold</p>
+Also not bold.
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,15): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <b>
+| <p>
+| <b>
+| "Bold "
+| " Not bold"
+| "
+Also not bold."
+
+#data
+<html>
+<font color=red><i>Italic and Red<p>Italic and Red </font> Just italic.</p> Italic only.</i> Plain
+<p>I should not be red. <font color=red>Red. <i>Italic and red.</p>
+<p>Italic and red. </i> Red.</font> I should not be red.</p>
+<b>Bold <i>Bold and italic</b> Only Italic </i> Plain
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(2,58): adoption-agency-1.3
+(3,67): unexpected-end-tag
+(4,23): adoption-agency-1.3
+(4,35): adoption-agency-1.3
+(5,30): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <font>
+| color="red"
+| <i>
+| "Italic and Red"
+| <i>
+| <p>
+| <font>
+| color="red"
+| "Italic and Red "
+| " Just italic."
+| " Italic only."
+| " Plain
+"
+| <p>
+| "I should not be red. "
+| <font>
+| color="red"
+| "Red. "
+| <i>
+| "Italic and red."
+| <font>
+| color="red"
+| <i>
+| "
+"
+| <p>
+| <font>
+| color="red"
+| <i>
+| "Italic and red. "
+| " Red."
+| " I should not be red."
+| "
+"
+| <b>
+| "Bold "
+| <i>
+| "Bold and italic"
+| <i>
+| " Only Italic "
+| " Plain"
+
+#data
+<html><body>
+<p><font size="7">First paragraph.</p>
+<p>Second paragraph.</p></font>
+<b><p><i>Bold and Italic</b> Italic</p>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(2,38): unexpected-end-tag
+(4,28): adoption-agency-1.3
+(4,28): adoption-agency-1.3
+(4,39): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| "
+"
+| <p>
+| <font>
+| size="7"
+| "First paragraph."
+| <font>
+| size="7"
+| "
+"
+| <p>
+| "Second paragraph."
+| "
+"
+| <b>
+| <p>
+| <b>
+| <i>
+| "Bold and Italic"
+| <i>
+| " Italic"
+
+#data
+<html>
+<dl>
+<dt><b>Boo
+<dd>Goo?
+</dl>
+</html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(4,4): end-tag-too-early
+(5,5): end-tag-too-early
+(6,7): expected-one-end-tag-but-got-another
+#document
+| <html>
+| <head>
+| <body>
+| <dl>
+| "
+"
+| <dt>
+| <b>
+| "Boo
+"
+| <dd>
+| <b>
+| "Goo?
+"
+| <b>
+| "
+"
+
+#data
+<html><body>
+<label><a><div>Hello<div>World</div></a></label>
+</body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(2,40): adoption-agency-1.3
+(2,48): unexpected-end-tag
+(3,7): expected-one-end-tag-but-got-another
+#document
+| <html>
+| <head>
+| <body>
+| "
+"
+| <label>
+| <a>
+| <div>
+| <a>
+| "Hello"
+| <div>
+| "World"
+| "
+"
+
+#data
+<table><center> <font>a</center> <img> <tr><td> </td> </tr> </table>
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,15): foster-parenting-start-tag
+(1,16): foster-parenting-character
+(1,22): foster-parenting-start-tag
+(1,23): foster-parenting-character
+(1,32): foster-parenting-end-tag
+(1,32): end-tag-too-early
+(1,33): foster-parenting-character
+(1,38): foster-parenting-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <center>
+| " "
+| <font>
+| "a"
+| <font>
+| <img>
+| " "
+| <table>
+| " "
+| <tbody>
+| <tr>
+| <td>
+| " "
+| " "
+| " "
+
+#data
+<table><tr><p><a><p>You should see this text.
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,14): unexpected-start-tag-implies-table-voodoo
+(1,17): unexpected-start-tag-implies-table-voodoo
+(1,20): unexpected-start-tag-implies-table-voodoo
+(1,20): closing-non-current-p-element
+(1,21): foster-parenting-character
+(1,22): foster-parenting-character
+(1,23): foster-parenting-character
+(1,24): foster-parenting-character
+(1,25): foster-parenting-character
+(1,26): foster-parenting-character
+(1,27): foster-parenting-character
+(1,28): foster-parenting-character
+(1,29): foster-parenting-character
+(1,30): foster-parenting-character
+(1,31): foster-parenting-character
+(1,32): foster-parenting-character
+(1,33): foster-parenting-character
+(1,34): foster-parenting-character
+(1,35): foster-parenting-character
+(1,36): foster-parenting-character
+(1,37): foster-parenting-character
+(1,38): foster-parenting-character
+(1,39): foster-parenting-character
+(1,40): foster-parenting-character
+(1,41): foster-parenting-character
+(1,42): foster-parenting-character
+(1,43): foster-parenting-character
+(1,44): foster-parenting-character
+(1,45): foster-parenting-character
+(1,45): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| <a>
+| <p>
+| <a>
+| "You should see this text."
+| <table>
+| <tbody>
+| <tr>
+
+#data
+<TABLE>
+<TR>
+<CENTER><CENTER><TD></TD></TR><TR>
+<FONT>
+<TABLE><tr></tr></TABLE>
+</P>
+<a></font><font></a>
+This page contains an insanely badly-nested tag sequence.
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(3,8): unexpected-start-tag-implies-table-voodoo
+(3,16): unexpected-start-tag-implies-table-voodoo
+(4,6): unexpected-start-tag-implies-table-voodoo
+(4,6): unexpected character token in table (the newline)
+(5,7): unexpected-start-tag-implies-end-tag
+(6,4): unexpected p end tag
+(7,10): adoption-agency-1.3
+(7,20): adoption-agency-1.3
+(8,57): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <center>
+| <center>
+| <font>
+| "
+"
+| <table>
+| "
+"
+| <tbody>
+| <tr>
+| "
+"
+| <td>
+| <tr>
+| "
+"
+| <table>
+| <tbody>
+| <tr>
+| <font>
+| "
+"
+| <p>
+| "
+"
+| <a>
+| <a>
+| <font>
+| <font>
+| "
+This page contains an insanely badly-nested tag sequence."
+
+#data
+<html>
+<body>
+<b><nobr><div>This text is in a div inside a nobr</nobr>More text that should not be in the nobr, i.e., the
+nobr should have closed the div inside it implicitly. </b><pre>A pre tag outside everything else.</pre>
+</body>
+</html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(3,56): adoption-agency-1.3
+(4,58): adoption-agency-1.3
+(5,7): expected-one-end-tag-but-got-another
+#document
+| <html>
+| <head>
+| <body>
+| "
+"
+| <b>
+| <nobr>
+| <div>
+| <b>
+| <nobr>
+| "This text is in a div inside a nobr"
+| "More text that should not be in the nobr, i.e., the
+nobr should have closed the div inside it implicitly. "
+| <pre>
+| "A pre tag outside everything else."
+| "
+
+"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit01.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit01.dat
new file mode 100644
index 000000000..c480accd9
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit01.dat
@@ -0,0 +1,705 @@
+#data
+Test
+#errors
+(1,4): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "Test"
+
+#data
+<div></div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+
+#data
+<div>Test</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "Test"
+
+#data
+<di
+#errors
+(1,3): eof-in-tag-name
+(1,3): expected-doctype-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<div>Hello</div>
+<script>
+console.log("PASS");
+</script>
+<div>Bye</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "Hello"
+| "
+"
+| <script>
+| "
+console.log("PASS");
+"
+| "
+"
+| <div>
+| "Bye"
+
+#data
+<div foo="bar">Hello</div>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo="bar"
+| "Hello"
+
+#data
+<div>Hello</div>
+<script>
+console.log("FOO<span>BAR</span>BAZ");
+</script>
+<div>Bye</div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| "Hello"
+| "
+"
+| <script>
+| "
+console.log("FOO<span>BAR</span>BAZ");
+"
+| "
+"
+| <div>
+| "Bye"
+
+#data
+<foo bar="baz"></foo><potato quack="duck"></potato>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| bar="baz"
+| <potato>
+| quack="duck"
+
+#data
+<foo bar="baz"><potato quack="duck"></potato></foo>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| bar="baz"
+| <potato>
+| quack="duck"
+
+#data
+<foo></foo bar="baz"><potato></potato quack="duck">
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,21): attributes-in-end-tag
+(1,51): attributes-in-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| <potato>
+
+#data
+</ tttt>
+#errors
+(1,2): expected-closing-tag-but-got-char
+(1,8): expected-doctype-but-got-eof
+#document
+| <!-- tttt -->
+| <html>
+| <head>
+| <body>
+
+#data
+<div FOO ><img><img></div>
+#errors
+(1,10): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| foo=""
+| <img>
+| <img>
+
+#data
+<p>Test</p<p>Test2</p>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,13): unexpected-end-tag
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| "TestTest2"
+
+#data
+<rdar://problem/6869687>
+#errors
+(1,7): unexpected-character-after-solidus-in-tag
+(1,8): unexpected-character-after-solidus-in-tag
+(1,16): unexpected-character-after-solidus-in-tag
+(1,24): expected-doctype-but-got-start-tag
+(1,24): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <rdar:>
+| 6869687=""
+| problem=""
+
+#data
+<A>test< /A>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,8): expected-tag-name
+(1,12): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| "test< /A>"
+
+#data
+&lt;
+#errors
+(1,4): expected-doctype-but-got-chars
+#document
+| <html>
+| <head>
+| <body>
+| "<"
+
+#data
+<body foo='bar'><body foo='baz' yo='mama'>
+#errors
+(1,16): expected-doctype-but-got-start-tag
+(1,42): unexpected-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| foo="bar"
+| yo="mama"
+
+#data
+<body></br foo="bar"></body>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,21): attributes-in-end-tag
+(1,21): unexpected-end-tag-treated-as
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+
+#data
+<bdy><br foo="bar"></body>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,26): expected-one-end-tag-but-got-another
+#document
+| <html>
+| <head>
+| <body>
+| <bdy>
+| <br>
+| foo="bar"
+
+#data
+<body></body></br foo="bar">
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,28): attributes-in-end-tag
+(1,28): unexpected-end-tag-after-body
+(1,28): unexpected-end-tag-treated-as
+#document
+| <html>
+| <head>
+| <body>
+| <br>
+
+#data
+<bdy></body><br foo="bar">
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,12): expected-one-end-tag-but-got-another
+(1,26): unexpected-start-tag-after-body
+(1,26): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <bdy>
+| <br>
+| foo="bar"
+
+#data
+<html><body></body></html><!-- Hi there -->
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <!-- Hi there -->
+
+#data
+<html><body></body></html>x<!-- Hi there -->
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,27): expected-eof-but-got-char
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <!-- Hi there -->
+
+#data
+<html><body></body></html>x<!-- Hi there --></html><!-- Again -->
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,27): expected-eof-but-got-char
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <!-- Hi there -->
+| <!-- Again -->
+
+#data
+<html><body></body></html>x<!-- Hi there --></body></html><!-- Again -->
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,27): expected-eof-but-got-char
+#document
+| <html>
+| <head>
+| <body>
+| "x"
+| <!-- Hi there -->
+| <!-- Again -->
+
+#data
+<html><body><ruby><div><rp>xx</rp></div></ruby></body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,27): XXX-undefined-error
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <rp>
+| "xx"
+
+#data
+<html><body><ruby><div><rt>xx</rt></div></ruby></body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,27): XXX-undefined-error
+#document
+| <html>
+| <head>
+| <body>
+| <ruby>
+| <div>
+| <rt>
+| "xx"
+
+#data
+<html><frameset><!--1--><noframes>A</noframes><!--2--></frameset><!--3--><noframes>B</noframes><!--4--></html><!--5--><noframes>C</noframes><!--6-->
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <frameset>
+| <!-- 1 -->
+| <noframes>
+| "A"
+| <!-- 2 -->
+| <!-- 3 -->
+| <noframes>
+| "B"
+| <!-- 4 -->
+| <noframes>
+| "C"
+| <!-- 5 -->
+| <!-- 6 -->
+
+#data
+<select><option>A<select><option>B<select><option>C<select><option>D<select><option>E<select><option>F<select><option>G<select>
+#errors
+(1,8): expected-doctype-but-got-start-tag
+(1,25): unexpected-select-in-select
+(1,59): unexpected-select-in-select
+(1,93): unexpected-select-in-select
+(1,127): unexpected-select-in-select
+(1,127): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <select>
+| <option>
+| "A"
+| <option>
+| "B"
+| <select>
+| <option>
+| "C"
+| <option>
+| "D"
+| <select>
+| <option>
+| "E"
+| <option>
+| "F"
+| <select>
+| <option>
+| "G"
+
+#data
+<dd><dd><dt><dt><dd><li><li>
+#errors
+(1,4): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <dd>
+| <dd>
+| <dt>
+| <dt>
+| <dd>
+| <li>
+| <li>
+
+#data
+<div><b></div><div><nobr>a<nobr>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,14): end-tag-too-early
+(1,32): unexpected-start-tag-implies-end-tag
+(1,32): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <b>
+| <div>
+| <b>
+| <nobr>
+| "a"
+| <nobr>
+
+#data
+<head></head>
+<body></body>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| "
+"
+| <body>
+
+#data
+<head></head> <style></style>ddd
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,21): unexpected-start-tag-out-of-my-head
+#document
+| <html>
+| <head>
+| <style>
+| " "
+| <body>
+| "ddd"
+
+#data
+<kbd><table></kbd><col><select><tr>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,18): unexpected-end-tag-implies-table-voodoo
+(1,18): unexpected-end-tag
+(1,31): unexpected-start-tag-implies-table-voodoo
+(1,35): unexpected-table-element-start-tag-in-select-in-table
+(1,35): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| <kbd>
+| <select>
+| <table>
+| <colgroup>
+| <col>
+| <tbody>
+| <tr>
+
+#data
+<kbd><table></kbd><col><select><tr></table><div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,18): unexpected-end-tag-implies-table-voodoo
+(1,18): unexpected-end-tag
+(1,31): unexpected-start-tag-implies-table-voodoo
+(1,35): unexpected-table-element-start-tag-in-select-in-table
+(1,48): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <kbd>
+| <select>
+| <table>
+| <colgroup>
+| <col>
+| <tbody>
+| <tr>
+| <div>
+
+#data
+<a><li><style></style><title></title></a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,41): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <li>
+| <a>
+| <style>
+| <title>
+
+#data
+<font></p><p><meta><title></title></font>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,10): unexpected-end-tag
+(1,41): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <font>
+| <p>
+| <p>
+| <font>
+| <meta>
+| <title>
+
+#data
+<a><center><title></title><a>
+#errors
+(1,3): expected-doctype-but-got-start-tag
+(1,29): unexpected-start-tag-implies-end-tag
+(1,29): adoption-agency-1.3
+(1,29): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <a>
+| <center>
+| <a>
+| <title>
+| <a>
+
+#data
+<svg><title><div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| <div>
+
+#data
+<svg><title><rect><div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,23): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| <rect>
+| <div>
+
+#data
+<svg><title><svg><div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,22): unexpected-html-element-in-foreign-content
+(1,22): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg title>
+| <svg svg>
+| <div>
+
+#data
+<img <="" FAIL>
+#errors
+(1,6): invalid-character-in-attribute-name
+(1,15): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <img>
+| <=""
+| fail=""
+
+#data
+<ul><li><div id='foo'/>A</li><li>B<div>C</div></li></ul>
+#errors
+(1,4): expected-doctype-but-got-start-tag
+(1,23): non-void-element-with-trailing-solidus
+(1,29): end-tag-too-early
+#document
+| <html>
+| <head>
+| <body>
+| <ul>
+| <li>
+| <div>
+| id="foo"
+| "A"
+| <li>
+| "B"
+| <div>
+| "C"
+
+#data
+<svg><em><desc></em>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,9): unexpected-html-element-in-foreign-content
+(1,20): adoption-agency-1.3
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <em>
+| <desc>
+
+#data
+<svg><tfoot></mi><td>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+(1,17): unexpected-end-tag
+(1,17): unexpected-end-tag
+(1,21): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <svg svg>
+| <svg tfoot>
+| <svg td>
+
+#data
+<math><mrow><mrow><mn>1</mn></mrow><mi>a</mi></mrow></math>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <math math>
+| <math mrow>
+| <math mrow>
+| <math mn>
+| "1"
+| <math mi>
+| "a"
+
+#data
+<!doctype html><input type="hidden"><frameset>
+#errors
+(1,46): unexpected-start-tag
+(1,46): eof-in-frameset
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <frameset>
+
+#data
+<!doctype html><input type="button"><frameset>
+#errors
+(1,46): unexpected-start-tag
+#document
+| <!DOCTYPE html>
+| <html>
+| <head>
+| <body>
+| <input>
+| type="button"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit02.dat b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit02.dat
new file mode 100644
index 000000000..1ce90dfe1
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/tree-construction/webkit02.dat
@@ -0,0 +1,134 @@
+#data
+<foo bar=qux/>
+#errors
+(1,14): expected-doctype-but-got-start-tag
+(1,14): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <foo>
+| bar="qux/"
+
+#data
+<p id="status"><noscript><strong>A</strong></noscript><span>B</span></p>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#script-on
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| id="status"
+| <noscript>
+| "<strong>A</strong>"
+| <span>
+| "B"
+
+#data
+<p id="status"><noscript><strong>A</strong></noscript><span>B</span></p>
+#errors
+(1,15): expected-doctype-but-got-start-tag
+#script-off
+#document
+| <html>
+| <head>
+| <body>
+| <p>
+| id="status"
+| <noscript>
+| <strong>
+| "A"
+| <span>
+| "B"
+
+#data
+<div><sarcasm><div></div></sarcasm></div>
+#errors
+(1,5): expected-doctype-but-got-start-tag
+#document
+| <html>
+| <head>
+| <body>
+| <div>
+| <sarcasm>
+| <div>
+
+#data
+<html><body><img src="" border="0" alt="><div>A</div></body></html>
+#errors
+(1,6): expected-doctype-but-got-start-tag
+(1,67): eof-in-attribute-value-double-quote
+#document
+| <html>
+| <head>
+| <body>
+
+#data
+<table><td></tbody>A
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,20): foster-parenting-character
+(1,20): eof-in-table
+#document
+| <html>
+| <head>
+| <body>
+| "A"
+| <table>
+| <tbody>
+| <tr>
+| <td>
+
+#data
+<table><td></thead>A
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,19): XXX-undefined-error
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "A"
+
+#data
+<table><td></tfoot>A
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,11): unexpected-cell-in-table-body
+(1,19): XXX-undefined-error
+(1,20): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <tbody>
+| <tr>
+| <td>
+| "A"
+
+#data
+<table><thead><td></tbody>A
+#errors
+(1,7): expected-doctype-but-got-start-tag
+(1,18): unexpected-cell-in-table-body
+(1,26): XXX-undefined-error
+(1,27): expected-closing-tag-but-got-eof
+#document
+| <html>
+| <head>
+| <body>
+| <table>
+| <thead>
+| <tr>
+| <td>
+| "A"
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/attributes.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/attributes.test
new file mode 100755
index 000000000..2815976bf
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/attributes.test
@@ -0,0 +1,1035 @@
+{"tests": [
+
+{"description": "allowed 'class' attribute on <span>",
+"input": "<span class>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'contenteditable' attribute on <span>",
+"input": "<span contenteditable>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'contextmenu' attribute on <span>",
+"input": "<span contextmenu>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'dir' attribute on <span>",
+"input": "<span dir>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'draggable' attribute on <span>",
+"input": "<span draggable>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'id' attribute on <span>",
+"input": "<span id>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'irrelevant' attribute on <span>",
+"input": "<span irrelevant>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'lang' attribute on <span>",
+"input": "<span lang>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ref' attribute on <span>",
+"input": "<span ref>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'tabindex' attribute on <span>",
+"input": "<span tabindex>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'template' attribute on <span>",
+"input": "<span template>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'title' attribute on <span>",
+"input": "<span title>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onabort' attribute on <span>",
+"input": "<span onabort>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onbeforeunload' attribute on <span>",
+"input": "<span onbeforeunload>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onblur' attribute on <span>",
+"input": "<span onblur>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onchange' attribute on <span>",
+"input": "<span onchange>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onclick' attribute on <span>",
+"input": "<span onclick>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'oncontextmenu' attribute on <span>",
+"input": "<span oncontextmenu>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondblclick' attribute on <span>",
+"input": "<span ondblclick>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondrag' attribute on <span>",
+"input": "<span ondrag>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondragend' attribute on <span>",
+"input": "<span ondragend>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondragenter' attribute on <span>",
+"input": "<span ondragenter>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondragleave' attribute on <span>",
+"input": "<span ondragleave>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondragover' attribute on <span>",
+"input": "<span ondragover>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondragstart' attribute on <span>",
+"input": "<span ondragstart>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ondrop' attribute on <span>",
+"input": "<span ondrop>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onerror' attribute on <span>",
+"input": "<span onerror>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onfocus' attribute on <span>",
+"input": "<span onfocus>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onkeydown' attribute on <span>",
+"input": "<span onkeydown>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onkeypress' attribute on <span>",
+"input": "<span onkeypress>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onkeyup' attribute on <span>",
+"input": "<span onkeyup>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onload' attribute on <span>",
+"input": "<span onload>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmessage' attribute on <span>",
+"input": "<span onmessage>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmousedown' attribute on <span>",
+"input": "<span onmousedown>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmousemove' attribute on <span>",
+"input": "<span onmousemove>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmouseout' attribute on <span>",
+"input": "<span onmouseout>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmouseover' attribute on <span>",
+"input": "<span onmouseover>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmouseup' attribute on <span>",
+"input": "<span onmouseup>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onmousewheel' attribute on <span>",
+"input": "<span onmousewheel>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onresize' attribute on <span>",
+"input": "<span onresize>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onscroll' attribute on <span>",
+"input": "<span onscroll>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onselect' attribute on <span>",
+"input": "<span onselect>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onsubmit' attribute on <span>",
+"input": "<span onsubmit>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onunload' attribute on <span>",
+"input": "<span onunload>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <command>",
+"input": "<command type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'label' attribute on <command>",
+"input": "<command label>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'icon' attribute on <command>",
+"input": "<command icon>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'hidden' attribute on <command>",
+"input": "<command hidden>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <command>",
+"input": "<command disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'checked' attribute on <command>",
+"input": "<command checked>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'radiogroup' attribute on <command>",
+"input": "<command radiogroup>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'default' attribute on <command>",
+"input": "<command default>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'value' attribute on <meter>",
+"input": "<meter value>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'min' attribute on <meter>",
+"input": "<meter min>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'low' attribute on <meter>",
+"input": "<meter low>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'high' attribute on <meter>",
+"input": "<meter high>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'max' attribute on <meter>",
+"input": "<meter max>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'optimum' attribute on <meter>",
+"input": "<meter optimum>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'cite' attribute on <ins>",
+"input": "<ins cite>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'datetime' attribute on <ins>",
+"input": "<ins datetime>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'multiple' attribute on <datagrid>",
+"input": "<datagrid multiple>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <datagrid>",
+"input": "<datagrid disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <meta>",
+"input": "<meta name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'http-equiv' attribute on <meta>",
+"input": "<meta http-equiv>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'content' attribute on <meta>",
+"input": "<meta content>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'charset' attribute on <meta>",
+"input": "<meta charset>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <video>",
+"input": "<video src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'autoplay' attribute on <video>",
+"input": "<video autoplay>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'start' attribute on <video>",
+"input": "<video start>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'loopstart' attribute on <video>",
+"input": "<video loopstart>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'loopend' attribute on <video>",
+"input": "<video loopend>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'end' attribute on <video>",
+"input": "<video end>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'loopcount' attribute on <video>",
+"input": "<video loopcount>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'controls' attribute on <video>",
+"input": "<video controls>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'style' attribute on <font>",
+"input": "<font style>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'media' attribute on <style>",
+"input": "<style media>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <style>",
+"input": "<style type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'scoped' attribute on <style>",
+"input": "<style scoped>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'alt' attribute on <img>",
+"input": "<img alt>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <img>",
+"input": "<img src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'usemap' attribute on <img>",
+"input": "<img usemap>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ismap' attribute on <img>",
+"input": "<img ismap>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'height' attribute on <img>",
+"input": "<img height>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'width' attribute on <img>",
+"input": "<img width>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'alt' attribute on <area>",
+"input": "<area alt>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'coords' attribute on <area>",
+"input": "<area coords>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'shape' attribute on <area>",
+"input": "<area shape>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'href' attribute on <area>",
+"input": "<area href>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'target' attribute on <area>",
+"input": "<area target>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ping' attribute on <area>",
+"input": "<area ping>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'rel' attribute on <area>",
+"input": "<area rel>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'media' attribute on <area>",
+"input": "<area media>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'hreflang' attribute on <area>",
+"input": "<area hreflang>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <area>",
+"input": "<area type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <menu>",
+"input": "<menu type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'label' attribute on <menu>",
+"input": "<menu label>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'autosubmit' attribute on <menu>",
+"input": "<menu autosubmit>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <param>",
+"input": "<param name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'value' attribute on <param>",
+"input": "<param value>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'value' attribute on <li>",
+"input": "<li value>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <source>",
+"input": "<source src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <source>",
+"input": "<source type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'media' attribute on <source>",
+"input": "<source media>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'xmlns' attribute on <html>",
+"input": "<html xmlns>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'open' attribute on <details>",
+"input": "<details open>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'colspan' attribute on <th>",
+"input": "<th colspan>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'rowspan' attribute on <th>",
+"input": "<th rowspan>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'scope' attribute on <th>",
+"input": "<th scope>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'value' attribute on <progress>",
+"input": "<progress value>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'max' attribute on <progress>",
+"input": "<progress max>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'colspan' attribute on <td>",
+"input": "<td colspan>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'rowspan' attribute on <td>",
+"input": "<td rowspan>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'cite' attribute on <blockquote>",
+"input": "<blockquote cite>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'data' attribute on <object>",
+"input": "<object data>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <object>",
+"input": "<object type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'usemap' attribute on <object>",
+"input": "<object usemap>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'height' attribute on <object>",
+"input": "<object height>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'width' attribute on <object>",
+"input": "<object width>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'height' attribute on <canvas>",
+"input": "<canvas height>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'width' attribute on <canvas>",
+"input": "<canvas width>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'href' attribute on <base>",
+"input": "<base href>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'target' attribute on <base>",
+"input": "<base target>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'href' attribute on <link>",
+"input": "<link href>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'rel' attribute on <link>",
+"input": "<link rel>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'media' attribute on <link>",
+"input": "<link media>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'hreflang' attribute on <link>",
+"input": "<link hreflang>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <link>",
+"input": "<link type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'href' attribute on <a>",
+"input": "<a href>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'target' attribute on <a>",
+"input": "<a target>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'ping' attribute on <a>",
+"input": "<a ping>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'rel' attribute on <a>",
+"input": "<a rel>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'media' attribute on <a>",
+"input": "<a media>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'hreflang' attribute on <a>",
+"input": "<a hreflang>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <a>",
+"input": "<a type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <event-source>",
+"input": "<event-source src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'start' attribute on <ol>",
+"input": "<ol start>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'span' attribute on <colgroup>",
+"input": "<colgroup span>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <script>",
+"input": "<script src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'defer' attribute on <script>",
+"input": "<script defer>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'async' attribute on <script>",
+"input": "<script async>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <script>",
+"input": "<script type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'cite' attribute on <q>",
+"input": "<q cite>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'cite' attribute on <del>",
+"input": "<del cite>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'datetime' attribute on <del>",
+"input": "<del datetime>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <iframe>",
+"input": "<iframe src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'datetime' attribute on <time>",
+"input": "<time datetime>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'src' attribute on <audio>",
+"input": "<audio src>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'autoplay' attribute on <audio>",
+"input": "<audio autoplay>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'start' attribute on <audio>",
+"input": "<audio start>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'loopstart' attribute on <audio>",
+"input": "<audio loopstart>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'loopend' attribute on <audio>",
+"input": "<audio loopend>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'end' attribute on <audio>",
+"input": "<audio end>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'loopcount' attribute on <audio>",
+"input": "<audio loopcount>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'controls' attribute on <audio>",
+"input": "<audio controls>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'span' attribute on <col>",
+"input": "<col span>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <fieldset>",
+"input": "<fieldset disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'form' attribute on <fieldset>",
+"input": "<fieldset form>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onsubmit' attribute on <form>",
+"input": "<form onsubmit>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <form>",
+"input": "<form name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onreset' attribute on <form>",
+"input": "<form onreset>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'accept' attribute on <form>",
+"input": "<form accept>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'replace' attribute on <form>",
+"input": "<form replace>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'accept-charset' attribute on <form>",
+"input": "<form accept-charset>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'action' attribute on <form>",
+"input": "<form action>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'data' attribute on <form>",
+"input": "<form data>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'method' attribute on <form>",
+"input": "<form method>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'enctype' attribute on <form>",
+"input": "<form enctype>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'rows' attribute on <textarea>",
+"input": "<textarea rows>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <textarea>",
+"input": "<textarea name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'form' attribute on <textarea>",
+"input": "<textarea form>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'required' attribute on <textarea>",
+"input": "<textarea required>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'cols' attribute on <textarea>",
+"input": "<textarea cols>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'accept' attribute on <textarea>",
+"input": "<textarea accept>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <textarea>",
+"input": "<textarea disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'readonly' attribute on <textarea>",
+"input": "<textarea readonly>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'wrap' attribute on <textarea>",
+"input": "<textarea wrap>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'autofocus' attribute on <textarea>",
+"input": "<textarea autofocus>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'maxlength' attribute on <textarea>",
+"input": "<textarea maxlength>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'a' attribute on <datalist>",
+"input": "<datalist a>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "allowed 'd' attribute on <datalist>",
+"input": "<datalist d>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "allowed 't' attribute on <datalist>",
+"input": "<datalist t>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "allowed 'action' attribute on <button>",
+"input": "<button action>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'enctype' attribute on <button>",
+"input": "<button enctype>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'method' attribute on <button>",
+"input": "<button method>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'replace' attribute on <button>",
+"input": "<button replace>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'template' attribute on <button>",
+"input": "<button template>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <button>",
+"input": "<button name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'form' attribute on <button>",
+"input": "<button form>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'value' attribute on <button>",
+"input": "<button value>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <button>",
+"input": "<button disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'autofocus' attribute on <button>",
+"input": "<button autofocus>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'type' attribute on <button>",
+"input": "<button type>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'accesskey' attribute on <label>",
+"input": "<label accesskey>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'form' attribute on <label>",
+"input": "<label form>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'for' attribute on <label>",
+"input": "<label for>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <optgroup>",
+"input": "<optgroup disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'label' attribute on <optgroup>",
+"input": "<optgroup label>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onforminput' attribute on <output>",
+"input": "<output onforminput>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'onformchange' attribute on <output>",
+"input": "<output onformchange>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <output>",
+"input": "<output name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'form' attribute on <output>",
+"input": "<output form>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'for' attribute on <output>",
+"input": "<output for>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'multiple' attribute on <select>",
+"input": "<select multiple>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'name' attribute on <select>",
+"input": "<select name>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'form' attribute on <select>",
+"input": "<select form>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'accesskey' attribute on <select>",
+"input": "<select accesskey>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <select>",
+"input": "<select disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'autofocus' attribute on <select>",
+"input": "<select autofocus>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'data' attribute on <select>",
+"input": "<select data>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'size' attribute on <select>",
+"input": "<select size>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'selected' attribute on <option>",
+"input": "<option selected>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'value' attribute on <option>",
+"input": "<option value>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'label' attribute on <option>",
+"input": "<option label>",
+"fail-if": "unknown-attribute"},
+
+{"description": "allowed 'disabled' attribute on <option>",
+"input": "<option disabled>",
+"fail-if": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <command>",
+"input": "<command foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <meter>",
+"input": "<meter foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <ins>",
+"input": "<ins foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <datagrid>",
+"input": "<datagrid foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <meta>",
+"input": "<meta foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <video>",
+"input": "<video foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <font>",
+"input": "<font foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <style>",
+"input": "<style foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <img>",
+"input": "<img foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <area>",
+"input": "<area foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <menu>",
+"input": "<menu foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <param>",
+"input": "<param foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <li>",
+"input": "<li foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <source>",
+"input": "<source foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <html>",
+"input": "<html foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <details>",
+"input": "<details foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <th>",
+"input": "<th foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <progress>",
+"input": "<progress foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <td>",
+"input": "<td foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <blockquote>",
+"input": "<blockquote foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <object>",
+"input": "<object foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <canvas>",
+"input": "<canvas foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <base>",
+"input": "<base foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <link>",
+"input": "<link foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <a>",
+"input": "<a foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <event-source>",
+"input": "<event-source foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <ol>",
+"input": "<ol foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <colgroup>",
+"input": "<colgroup foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <script>",
+"input": "<script foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <q>",
+"input": "<q foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <del>",
+"input": "<del foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <iframe>",
+"input": "<iframe foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <time>",
+"input": "<time foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <audio>",
+"input": "<audio foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <col>",
+"input": "<col foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "missing required 'href' attribute on <link>",
+"input": "<link rel>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'rel' attribute on <link>",
+"input": "<link href>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'dir' attribute on <bdo>",
+"input": "<bdo>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'src' attribute on <img>",
+"input": "<img>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'src' attribute on <embed>",
+"input": "<embed>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'name' attribute on <param>",
+"input": "<param value>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'value' attribute on <param>",
+"input": "<param name>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'src' attribute on <source>",
+"input": "<source>",
+"fail-unless": "missing-required-attribute"},
+
+{"description": "missing required 'id' attribute on <map>",
+"input": "<map>",
+"fail-unless": "missing-required-attribute"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-href-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-href-attribute.test
new file mode 100755
index 000000000..83a626a79
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-href-attribute.test
@@ -0,0 +1,787 @@
+{"tests": [
+
+{"description": "base href contains invalid URI due to leading space",
+"input": "<base href=' http://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to trailing space",
+"input": "<base href='http://www.example.com/ '",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to space in scheme",
+"input": "<base href='ht tp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to space in domain",
+"input": "<base href='http://www.example. com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to space in path",
+"input": "<base href='http://www.example.com/a b'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to space in fragment",
+"input": "<base href='http://www.example.com/a#b c'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to space in query",
+"input": "<base href='http://www.example.com/a?b c'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to leading tab",
+"input": "<base href='\thttp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to trailing tab",
+"input": "<base href='http://www.example.com/\t'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to tab in scheme",
+"input": "<base href='ht\ttp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to tab in domain",
+"input": "<base href='http://www.example.\tcom/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to tab in path",
+"input": "<base href='http://www.example.com/a\tb'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to tab in fragment",
+"input": "<base href='http://www.example.com/a#b\tc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to tab in query",
+"input": "<base href='http://www.example.com/a?b\tc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to leading LF",
+"input": "<base href='\nhttp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LF in scheme",
+"input": "<base href='ht\ntp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LF in domain",
+"input": "<base href='http://www.example.\ncom/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LF in path",
+"input": "<base href='http://www.example.com/a\nb'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LF in fragment",
+"input": "<base href='http://www.example.com/a#b\nc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LF in query",
+"input": "<base href='http://www.example.com/a?b\nc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to leading LT",
+"input": "<base href='\u000Bhttp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to trailing LT",
+"input": "<base href='http://www.example.com/\u000B'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LT in scheme",
+"input": "<base href='ht\u000Btp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LT in domain",
+"input": "<base href='http://www.example.\u000Bcom/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LT in path",
+"input": "<base href='http://www.example.com/a\u000Bb'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LT in fragment",
+"input": "<base href='http://www.example.com/a#b\u000Bc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to LT in query",
+"input": "<base href='http://www.example.com/a?b\u000Bc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to leading FF",
+"input": "<base href='\u000Chttp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to trailing FF",
+"input": "<base href='http://www.example.com/\u000C'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to FF in scheme",
+"input": "<base href='ht\u000Ctp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to FF in domain",
+"input": "<base href='http://www.example.\u000Ccom/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to FF in path",
+"input": "<base href='http://www.example.com/a\u000Cb'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to FF in fragment",
+"input": "<base href='http://www.example.com/a#b\u000Cc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to FF in query",
+"input": "<base href='http://www.example.com/a?b\u000Cc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to leading CR",
+"input": "<base href='\rhttp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to CR in scheme",
+"input": "<base href='ht\rtp://www.example.com/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to CR in domain",
+"input": "<base href='http://www.example.\rcom/'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to CR in path",
+"input": "<base href='http://www.example.com/a\rb'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to CR in fragment",
+"input": "<base href='http://www.example.com/a#b\rc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains invalid URI due to CR in query",
+"input": "<base href='http://www.example.com/a?b\rc'",
+"fail-unless": "invalid-uri-char"},
+
+{"description": "base href contains valid URI scheme 'ftp'",
+"input": "<base href='ftp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'http'",
+"input": "<base href='http://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'gopher'",
+"input": "<base href='gopher://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'mailto'",
+"input": "<base href='mailto://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'news'",
+"input": "<base href='news://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'nntp'",
+"input": "<base href='nntp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'telnet'",
+"input": "<base href='telnet://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'wais'",
+"input": "<base href='wais://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'file'",
+"input": "<base href='file://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'prospero'",
+"input": "<base href='prospero://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'z39.50s'",
+"input": "<base href='z39.50s://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'z39.50r'",
+"input": "<base href='z39.50r://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'cid'",
+"input": "<base href='cid://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'mid'",
+"input": "<base href='mid://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'vemmi'",
+"input": "<base href='vemmi://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'service'",
+"input": "<base href='service://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'imap'",
+"input": "<base href='imap://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'nfs'",
+"input": "<base href='nfs://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'acap'",
+"input": "<base href='acap://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'rtsp'",
+"input": "<base href='rtsp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'tip'",
+"input": "<base href='tip://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'pop'",
+"input": "<base href='pop://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'data'",
+"input": "<base href='data://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'dav'",
+"input": "<base href='dav://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'opaquelocktoken'",
+"input": "<base href='opaquelocktoken://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'sip'",
+"input": "<base href='sip://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'sips'",
+"input": "<base href='sips://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'tel'",
+"input": "<base href='tel://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'fax'",
+"input": "<base href='fax://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'modem'",
+"input": "<base href='modem://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'ldap'",
+"input": "<base href='ldap://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'https'",
+"input": "<base href='https://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'soap.beep'",
+"input": "<base href='soap.beep://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'soap.beeps'",
+"input": "<base href='soap.beeps://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'xmlrpc.beep'",
+"input": "<base href='xmlrpc.beep://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'xmlrpc.beeps'",
+"input": "<base href='xmlrpc.beeps://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'urn'",
+"input": "<base href='urn://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'go'",
+"input": "<base href='go://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'h323'",
+"input": "<base href='h323://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'ipp'",
+"input": "<base href='ipp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'tftp'",
+"input": "<base href='tftp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'mupdate'",
+"input": "<base href='mupdate://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'pres'",
+"input": "<base href='pres://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'im'",
+"input": "<base href='im://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'mtqp'",
+"input": "<base href='mtqp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'iris.beep'",
+"input": "<base href='iris.beep://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'dict'",
+"input": "<base href='dict://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'snmp'",
+"input": "<base href='snmp://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'crid'",
+"input": "<base href='crid://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'tag'",
+"input": "<base href='tag://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'dns'",
+"input": "<base href='dns://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'info'",
+"input": "<base href='info://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'javascript'",
+"input": "<base href='javascript:foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'FTP'",
+"input": "<base href='FTP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'HTTP'",
+"input": "<base href='HTTP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'GOPHER'",
+"input": "<base href='GOPHER://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'MAILTO'",
+"input": "<base href='MAILTO://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'NEWS'",
+"input": "<base href='NEWS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'NNTP'",
+"input": "<base href='NNTP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'TELNET'",
+"input": "<base href='TELNET://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'WAIS'",
+"input": "<base href='WAIS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'FILE'",
+"input": "<base href='FILE://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'PROSPERO'",
+"input": "<base href='PROSPERO://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'Z39.50S'",
+"input": "<base href='Z39.50S://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'Z39.50R'",
+"input": "<base href='Z39.50R://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'CID'",
+"input": "<base href='CID://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'MID'",
+"input": "<base href='MID://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'VEMMI'",
+"input": "<base href='VEMMI://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'SERVICE'",
+"input": "<base href='SERVICE://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'IMAP'",
+"input": "<base href='IMAP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'NFS'",
+"input": "<base href='NFS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'ACAP'",
+"input": "<base href='ACAP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'RTSP'",
+"input": "<base href='RTSP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'TIP'",
+"input": "<base href='TIP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'POP'",
+"input": "<base href='POP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'DATA'",
+"input": "<base href='DATA://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'DAV'",
+"input": "<base href='DAV://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'OPAQUELOCKTOKEN'",
+"input": "<base href='OPAQUELOCKTOKEN://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'SIP'",
+"input": "<base href='SIP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'SIPS'",
+"input": "<base href='SIPS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'TEL'",
+"input": "<base href='TEL://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'FAX'",
+"input": "<base href='FAX://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'MODEM'",
+"input": "<base href='MODEM://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'LDAP'",
+"input": "<base href='LDAP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'HTTPS'",
+"input": "<base href='HTTPS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'SOAP.BEEP'",
+"input": "<base href='SOAP.BEEP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'SOAP.BEEPS'",
+"input": "<base href='SOAP.BEEPS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'XMLRPC.BEEP'",
+"input": "<base href='XMLRPC.BEEP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'XMLRPC.BEEPS'",
+"input": "<base href='XMLRPC.BEEPS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'URN'",
+"input": "<base href='URN://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'GO'",
+"input": "<base href='GO://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'H323'",
+"input": "<base href='H323://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'IPP'",
+"input": "<base href='IPP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'TFTP'",
+"input": "<base href='TFTP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'MUPDATE'",
+"input": "<base href='MUPDATE://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'PRES'",
+"input": "<base href='PRES://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'IM'",
+"input": "<base href='IM://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'MTQP'",
+"input": "<base href='MTQP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'IRIS.BEEP'",
+"input": "<base href='IRIS.BEEP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'DICT'",
+"input": "<base href='DICT://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'SNMP'",
+"input": "<base href='SNMP://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'CRID'",
+"input": "<base href='CRID://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'TAG'",
+"input": "<base href='TAG://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'DNS'",
+"input": "<base href='DNS://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'INFO'",
+"input": "<base href='INFO://foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'JAVASCRIPT'",
+"input": "<base href='JAVASCRIPT:foo'",
+"fail-if": "invalid-scheme"},
+
+{"description": "base href contains valid URI scheme 'foo'",
+"input": "<base href='foo:bar'",
+"fail-unless": "invalid-scheme"},
+
+{"description": "base href contains valid URI 'g'",
+"input": "<base href='g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI './g'",
+"input": "<base href='./g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g/'",
+"input": "<base href='g/'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '/g'",
+"input": "<base href='/g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '//g'",
+"input": "<base href='//g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '?y'",
+"input": "<base href='?y'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g?y'",
+"input": "<base href='g?y'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '#s'",
+"input": "<base href='#s'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g#s'",
+"input": "<base href='g#s'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g?y#s'",
+"input": "<base href='g?y#s'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI ';x'",
+"input": "<base href=';x'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g;x'",
+"input": "<base href='g;x'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g;x?y#s'",
+"input": "<base href='g;x?y#s'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '.'",
+"input": "<base href='.'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI './'",
+"input": "<base href='./'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '..'",
+"input": "<base href='..'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../'",
+"input": "<base href='../'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../g'",
+"input": "<base href='../g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../..'",
+"input": "<base href='../..'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../../'",
+"input": "<base href='../../'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../../g'",
+"input": "<base href='../../g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../../../g'",
+"input": "<base href='../../../g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '../../../../g'",
+"input": "<base href='../../../../g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '/./g'",
+"input": "<base href='/./g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '/../g'",
+"input": "<base href='/../g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g.'",
+"input": "<base href='g.'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '.g'",
+"input": "<base href='.g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g..'",
+"input": "<base href='g..'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI '..g'",
+"input": "<base href='..g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI './../g'",
+"input": "<base href='./../g'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI './g/.'",
+"input": "<base href='./g/.'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g/./h'",
+"input": "<base href='g/./h'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g/../h'",
+"input": "<base href='g/../h'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g;x=1/./y'",
+"input": "<base href='g;x=1/./y'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g;x=1/../y'",
+"input": "<base href='g;x=1/../y'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g?y/./x'",
+"input": "<base href='g?y/./x'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g?y/../x'",
+"input": "<base href='g?y/../x'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g#s/./x'",
+"input": "<base href='g#s/./x'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'g#s/../x'",
+"input": "<base href='g#s/../x'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI './g:h'",
+"input": "<base href='./g:h'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://www.w%33.org'",
+"input": "<base href='http://www.w%33.org'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://r%C3%A4ksm%C3%B6rg%C3%A5s.josefsson.org'",
+"input": "<base href='http://r%C3%A4ksm%C3%B6rg%C3%A5s.josefsson.org'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://xn--rksmrgs-5wao1o.josefsson.org'",
+"input": "<base href='http://xn--rksmrgs-5wao1o.josefsson.org'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://%E7%B4%8D%E8%B1%86.w3.mag.keio.ac.jp'",
+"input": "<base href='http://%E7%B4%8D%E8%B1%86.w3.mag.keio.ac.jp'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://xn--99zt52a.w3.mag.keio.ac.jp'",
+"input": "<base href='http://xn--99zt52a.w3.mag.keio.ac.jp'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://www.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp/'",
+"input": "<base href='http://www.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp/'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/'",
+"input": "<base href='http://www.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp/'",
+"input": "<base href='http://%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.%E3%81%BB%E3%82%93%E3%81%A8%E3%81%86%E3%81%AB%E3%81%AA%E3%81%8C%E3%81%84%E3%82%8F%E3%81%91%E3%81%AE%E3%82%8F%E3%81%8B%E3%82%89%E3%81%AA%E3%81%84%E3%81%A9%E3%82%81%E3%81%84%E3%82%93%E3%82%81%E3%81%84%E3%81%AE%E3%82%89%E3%81%B9%E3%82%8B%E3%81%BE%E3%81%A0%E3%81%AA%E3%81%8C%E3%81%8F%E3%81%97%E3%81%AA%E3%81%84%E3%81%A8%E3%81%9F%E3%82%8A%E3%81%AA%E3%81%84.w3.mag.keio.ac.jp/'",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "base href contains valid URI 'http://xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/'",
+"input": "<base href='http://xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.xn--n8jaaaaai5bhf7as8fsfk3jnknefdde3fg11amb5gzdb4wi9bya3kc6lra.w3.mag.keio.ac.jp/'",
+"fail-if": "invalid-attribute-value"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-target-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-target-attribute.test
new file mode 100755
index 000000000..6d4db20b2
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/base-target-attribute.test
@@ -0,0 +1,35 @@
+{"tests": [
+
+{"description": "valid base target attribute '_self'",
+"input": "<base target=_self>",
+"fail-if": "invalid-browsing-context"},
+
+{"description": "valid base target attribute '_parent'",
+"input": "<base target=_parent>",
+"fail-if": "invalid-browsing-context"},
+
+{"description": "valid base target attribute '_top'",
+"input": "<base target=_top>",
+"fail-if": "invalid-browsing-context"},
+
+{"description": "valid base target attribute '_blank'",
+"input": "<base target=_blank>",
+"fail-if": "invalid-browsing-context"},
+
+{"description": "valid base target attribute 'foo'",
+"input": "<base target=foo>",
+"fail-if": "invalid-browsing-context"},
+
+{"description": "base target attribute may be blank",
+"input": "<base target>",
+"fail-if": "invalid-browsing-context"},
+
+{"description": "invalid base target attribute '_'",
+"input": "<base target=_>",
+"fail-unless": "invalid-browsing-context"},
+
+{"description": "invalid base target attribute '_foo'",
+"input": "<base target=_foo>",
+"fail-unless": "invalid-browsing-context"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/blockquote-cite-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/blockquote-cite-attribute.test
new file mode 100755
index 000000000..153c6e6af
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/blockquote-cite-attribute.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description": "blockquote cite contains invalid URI due to space in domain",
+"input": "<blockquote cite='http://www.example. com/'",
+"fail-unless": "invalid-uri-char"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/classattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/classattribute.test
new file mode 100755
index 000000000..0fe703fcd
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/classattribute.test
@@ -0,0 +1,152 @@
+{"tests": [
+
+{"description": "valid single class attribute value",
+"input": "<span class=a>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading space",
+"input": "<span class=' a'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with trailing space",
+"input": "<span class='a '>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading and trailing space",
+"input": "<span class=' a '>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading tab",
+"input": "<span class='\ta'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with trailing tab",
+"input": "<span class='a\t'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading and trailing tab",
+"input": "<span class='\ta\t'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading LF",
+"input": "<span class='\na'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with trailing LF",
+"input": "<span class='a\n'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading and trailing LF",
+"input": "<span class='\na\n'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading LT",
+"input": "<span class='\u000Ba'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with trailing LT",
+"input": "<span class='a\u000B'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading and trailing LT",
+"input": "<span class='\u000Ba\u000B'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading FF",
+"input": "<span class='\u000Ca'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with trailing FF",
+"input": "<span class='a\u000C'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading and trailing FF",
+"input": "<span class='\u000Ca\u000C'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading CR",
+"input": "<span class='\ra'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with trailing CR",
+"input": "<span class='a\r'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid single class attribute value with leading and trailing CR",
+"input": "<span class='\ra\r'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid double class attribute value separated by space",
+"input": "<span class='a b'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid double class attribute value separated by tab",
+"input": "<span class='a\tb'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid double class attribute value separated by LF",
+"input": "<span class='a\nb'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid double class attribute value separated by LT",
+"input": "<span class='a\u000Bb'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid double class attribute value separated by FF",
+"input": "<span class='a\u000Cb'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid double class attribute value separated by CR",
+"input": "<span class='a\rb'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid duplicated class attribute value separated by space",
+"input": "<span class='a a'>",
+"fail-unless": "invalid-attribute-value"},
+
+{"description": "invalid duplicated class attribute value separated by tab",
+"input": "<span class='a\ta'>",
+"fail-unless": "invalid-attribute-value"},
+
+{"description": "invalid duplicated class attribute value separated by LF",
+"input": "<span class='a\na'>",
+"fail-unless": "invalid-attribute-value"},
+
+{"description": "invalid duplicated class attribute value separated by LT",
+"input": "<span class='a\u000Ba'>",
+"fail-unless": "invalid-attribute-value"},
+
+{"description": "invalid duplicated class attribute value separated by FF",
+"input": "<span class='a\u000Ca'>",
+"fail-unless": "invalid-attribute-value"},
+
+{"description": "invalid duplicated class attribute value separated by CR",
+"input": "<span class='a\ra'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated class attribute value separated by space",
+"input": "<span class='a a'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated class attribute value separated by tab",
+"input": "<span class='a\ta'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated class attribute value separated by LF",
+"input": "<span class='a\na'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated class attribute value separated by LT",
+"input": "<span class='a\u000Ba'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated class attribute value separated by FF",
+"input": "<span class='a\u000Ca'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated class attribute value separated by CR",
+"input": "<span class='a\ra'>",
+"fail-unless": "duplicate-value-in-token-list"}
+
+]}
+
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contenteditableattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contenteditableattribute.test
new file mode 100755
index 000000000..b6ae1d27e
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contenteditableattribute.test
@@ -0,0 +1,59 @@
+{"tests": [
+
+{"description": "valid contenteditable attribute value 'true'",
+"input": "<span contenteditable=true>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value 'TRUE'",
+"input": "<span contenteditable=TRUE>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value 'TrUe'",
+"input": "<span contenteditable=TrUe>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value 'false'",
+"input": "<span contenteditable=false>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value 'FALSE'",
+"input": "<span contenteditable=FALSE>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value 'FalSe'",
+"input": "<span contenteditable=FalSe>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value ''",
+"input": "<span contenteditable=''>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid contenteditable attribute value (not specified)",
+"input": "<span contenteditable>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid contenteditable attribute value 'foo'",
+"input": "<span contenteditable=foo>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid contenteditable attribute value '0'",
+"input": "<span contenteditable=0>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid contenteditable attribute value '1'",
+"input": "<span contenteditable=1>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid contenteditable attribute value 'yes'",
+"input": "<span contenteditable=yes>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid contenteditable attribute value 'no'",
+"input": "<span contenteditable=no>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid contenteditable attribute value 'inherit'",
+"input": "<span contenteditable=inherit>",
+"fail-unless": "invalid-enumerated-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contextmenuattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contextmenuattribute.test
new file mode 100755
index 000000000..59e4397c9
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/contextmenuattribute.test
@@ -0,0 +1,115 @@
+{"tests": [
+
+{"description": "contextmenu points to valid ID earlier",
+"input": "<menu id=a><span contextmenu=a>",
+"fail-if": "id-does-not-exist"},
+
+{"description": "contextmenu points to valid ID later",
+"input": "<span contextmenu=a><menu id=a>",
+"fail-if": "id-does-not-exist"},
+
+{"description": "contextmenu points to non-existent ID",
+"input": "<span contextmenu=a>",
+"fail-unless": "id-does-not-exist"},
+
+{"description": "contextmenu points to ID on non-menu element",
+"input": "<span id=a><span contextmenu=a>",
+"fail-unless": "contextmenu-must-point-to-menu"},
+
+{"description": "uppercase contextmenu points to ID on non-menu element",
+"input": "<span id=a><span CONTEXTMENU=a>",
+"fail-unless": "contextmenu-must-point-to-menu"},
+
+{"description": "valid ID 'a'",
+"input": "<span contextmenu=a>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid ID '1'",
+"input": "<span contextmenu=1>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "wacky but valid ID",
+"input": "<span contextmenu='<html><head><title>a</title></head><body><p>b</p></body></html>'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid blank ID",
+"input": "<span id>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid blank ID with quotes",
+"input": "<span contextmenu=''>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid ID because of leading space",
+"input": "<span contextmenu=' a'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing space",
+"input": "<span contextmenu='a '>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of space in value",
+"input": "<span contextmenu='a b'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading tab",
+"input": "<span contextmenu='\ta'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing tab",
+"input": "<span contextmenu='a\t'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of tab in value",
+"input": "<span contextmenu='a\tb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading LF",
+"input": "<span contextmenu='\na'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing LF",
+"input": "<span contextmenu='a\n'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of LF in value",
+"input": "<span contextmenu='a\nb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading LT",
+"input": "<span contextmenu='\u000Ba'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing LT",
+"input": "<span contextmenu='a\u000B'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of LT in value",
+"input": "<span contextmenu='a\u000Bb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading FF",
+"input": "<span contextmenu='\u000Ca'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing FF",
+"input": "<span contextmenu='a\u000C'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of FF in value",
+"input": "<span contextmenu='a\u000Cb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading CR",
+"input": "<span contextmenu='\ra'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing CR",
+"input": "<span contextmenu='a\r'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of CR in value",
+"input": "<span contextmenu='a\rb'>",
+"fail-unless": "space-in-id"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/dirattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/dirattribute.test
new file mode 100755
index 000000000..71883dc1c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/dirattribute.test
@@ -0,0 +1,59 @@
+{"tests": [
+
+{"description": "valid dir attribute value 'ltr'",
+"input": "<span dir=ltr>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid dir attribute value 'LTR'",
+"input": "<span dir=LTR>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid dir attribute value 'LtR'",
+"input": "<span dir=LtR>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid dir attribute value 'rtl'",
+"input": "<span dir=rtl>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid dir attribute value 'RTL'",
+"input": "<span dir=RTL>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid dir attribute value 'RtL'",
+"input": "<span dir=RtL>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid dir attribute value due to leading space",
+"input": "<span dir=' ltr'>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "dir attribute value can not be blank",
+"input": "<span dir>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "dir attribute value can not be blank (with quotes)",
+"input": "<span dir=''>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid dir attribute value 'left'",
+"input": "<span dir=left>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid dir attribute value 'right'",
+"input": "<span dir=right>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid dir attribute value 'lefttoright'",
+"input": "<span dir=lefttoright>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid dir attribute value 'righttoleft'",
+"input": "<span dir=righttoleft>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid dir attribute value 'inherit'",
+"input": "<span dir=inherit>",
+"fail-unless": "invalid-enumerated-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/draggableattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/draggableattribute.test
new file mode 100755
index 000000000..49e692d98
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/draggableattribute.test
@@ -0,0 +1,63 @@
+{"tests": [
+
+{"description": "valid draggable attribute value 'true'",
+"input": "<span draggable=true>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid draggable attribute value 'TRUE'",
+"input": "<span draggable=TRUE>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid draggable attribute value 'TrUe'",
+"input": "<span draggable=TrUe>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid draggable attribute value 'false'",
+"input": "<span draggable=false>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid draggable attribute value 'FALSE'",
+"input": "<span draggable=FALSE>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid draggable attribute value 'FalSe'",
+"input": "<span draggable=FalSe>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid draggable attribute value ''",
+"input": "<span draggable=''>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "valid draggable attribute value (not specified)",
+"input": "<span draggable>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid draggable attribute value 'foo'",
+"input": "<span draggable=foo>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid draggable attribute value '0'",
+"input": "<span draggable=0>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid draggable attribute value '1'",
+"input": "<span draggable=1>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid draggable attribute value 'yes'",
+"input": "<span draggable=yes>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid draggable attribute value 'no'",
+"input": "<span draggable=no>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid draggable attribute value 'auto'",
+"input": "<span draggable=auto>",
+"fail-unless": "invalid-enumerated-value"},
+
+{"description": "invalid draggable attribute value 'inherit'",
+"input": "<span draggable=inherit>",
+"fail-unless": "invalid-enumerated-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/html-xmlns-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/html-xmlns-attribute.test
new file mode 100755
index 000000000..2ebd19eba
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/html-xmlns-attribute.test
@@ -0,0 +1,23 @@
+{"tests": [
+
+{"description": "valid html xmlns attribute",
+"input": "<html xmlns=http://www.w3.org/1999/xhtml>",
+"fail-if": "invalid-root-namespace"},
+
+{"description": "invalid html xmlns attribute due to leading space",
+"input": "<html xmlns=' http://www.w3.org/1999/xhtml'>",
+"fail-unless": "invalid-root-namespace"},
+
+{"description": "invalid html xmlns attribute due to trailing space",
+"input": "<html xmlns='http://www.w3.org/1999/xhtml '>",
+"fail-unless": "invalid-root-namespace"},
+
+{"description": "invalid html xmlns attribute due to uppercase",
+"input": "<html xmlns=HTTP://WWW.W3.ORG/1999/XHTML>",
+"fail-unless": "invalid-root-namespace"},
+
+{"description": "invalid xmlns attribute on non-html element",
+"input": "<body xmlns=http://www.w3.org/1999/xhtml>",
+"fail-unless": "unknown-attribute"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/idattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/idattribute.test
new file mode 100755
index 000000000..4058d5219
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/idattribute.test
@@ -0,0 +1,115 @@
+{"tests": [
+
+{"description": "valid ID 'a'",
+"input": "<span id=a>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid ID '1'",
+"input": "<span id=1>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "wacky but valid ID",
+"input": "<span id='<html><head><title>a</title></head><body><p>b</p></body></html>'>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid blank ID",
+"input": "<span id>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid blank ID with quotes",
+"input": "<span id=''>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid ID because of leading space",
+"input": "<span id=' a'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing space",
+"input": "<span id='a '>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of space in value",
+"input": "<span id='a b'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading tab",
+"input": "<span id='\ta'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing tab",
+"input": "<span id='a\t'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of tab in value",
+"input": "<span id='a\tb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading LF",
+"input": "<span id='\na'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing LF",
+"input": "<span id='a\n'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of LF in value",
+"input": "<span id='a\nb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading LT",
+"input": "<span id='\u000Ba'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing LT",
+"input": "<span id='a\u000B'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of LT in value",
+"input": "<span id='a\u000Bb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading FF",
+"input": "<span id='\u000Ca'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing FF",
+"input": "<span id='a\u000C'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of FF in value",
+"input": "<span id='a\u000Cb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of leading CR",
+"input": "<span id='\ra'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of trailing CR",
+"input": "<span id='a\r'>",
+"fail-unless": "space-in-id"},
+
+{"description": "invalid ID because of CR in value",
+"input": "<span id='a\rb'>",
+"fail-unless": "space-in-id"},
+
+{"description": "duplicate ID values",
+"input": "<span id=a><span id=a>",
+"fail-unless": "duplicate-id"},
+
+{"description": "duplicate ID values with spaces (weird but true)",
+"input": "<span id='a '><span id='a '>",
+"fail-unless": "duplicate-id"},
+
+{"description": "not duplicate ID values because spaces don't match",
+"input": "<span id=a><span id='a '>",
+"fail-if": "duplicate-id"},
+
+{"description": "not duplicate ID values because spaces don't match",
+"input": "<span id=' a'><span id='a '>",
+"fail-if": "duplicate-id"},
+
+{"description": "not duplicate ID values because case doesn't match",
+"input": "<span id=a><span id=A>",
+"fail-if": "duplicate-id"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/inputattributes.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/inputattributes.test
new file mode 100755
index 000000000..acdc50e7b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/inputattributes.test
@@ -0,0 +1,2795 @@
+{"tests": [
+
+{"description": "'size' attribute deprecated on <input type='text'>",
+"input": "<input type=text size>",
+"fail-unless": "deprecated-attribute"},
+
+{"description": "'size' attribute deprecated on <input type='password'>",
+"input": "<input type=password size>",
+"fail-unless": "deprecated-attribute"},
+
+{"description": "allowed 'accesskey' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'checked' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox checked>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='checkbox'>",
+"input": "<input type=checkbox action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='text'>",
+"input": "<input type=text accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='text'>",
+"input": "<input type=text accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='text'>",
+"input": "<input type=text replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='text'>",
+"input": "<input type=text disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='text'>",
+"input": "<input type=text alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'size' attribute on <input type='text'>",
+"input": "<input type=text size>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='text'>",
+"input": "<input type=text checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='text'>",
+"input": "<input type=text min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'pattern' attribute on <input type='text'>",
+"input": "<input type=text pattern>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='text'>",
+"input": "<input type=text readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='text'>",
+"input": "<input type=text template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='text'>",
+"input": "<input type=text autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='text'>",
+"input": "<input type=text target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='text'>",
+"input": "<input type=text method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='text'>",
+"input": "<input type=text form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='text'>",
+"input": "<input type=text max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='text'>",
+"input": "<input type=text step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='text'>",
+"input": "<input type=text enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='text'>",
+"input": "<input type=text src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='text'>",
+"input": "<input type=text name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='text'>",
+"input": "<input type=text required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='text'>",
+"input": "<input type=text list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='text'>",
+"input": "<input type=text value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='text'>",
+"input": "<input type=text autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'inputmode' attribute on <input type='text'>",
+"input": "<input type=text inputmode>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'maxlength' attribute on <input type='text'>",
+"input": "<input type=text maxlength>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='text'>",
+"input": "<input type=text action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='text'>",
+"input": "<input type=text tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='image'>",
+"input": "<input type=image accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='image'>",
+"input": "<input type=image accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'replace' attribute on <input type='image'>",
+"input": "<input type=image replace>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='image'>",
+"input": "<input type=image disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'alt' attribute on <input type='image'>",
+"input": "<input type=image alt>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='image'>",
+"input": "<input type=image size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='image'>",
+"input": "<input type=image checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='image'>",
+"input": "<input type=image min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='image'>",
+"input": "<input type=image pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='image'>",
+"input": "<input type=image readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='image'>",
+"input": "<input type=image template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='image'>",
+"input": "<input type=image autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'target' attribute on <input type='image'>",
+"input": "<input type=image target>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'method' attribute on <input type='image'>",
+"input": "<input type=image method>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='image'>",
+"input": "<input type=image form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='image'>",
+"input": "<input type=image max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='image'>",
+"input": "<input type=image step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'enctype' attribute on <input type='image'>",
+"input": "<input type=image enctype>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'src' attribute on <input type='image'>",
+"input": "<input type=image src>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='image'>",
+"input": "<input type=image name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='image'>",
+"input": "<input type=image required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='image'>",
+"input": "<input type=image list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'value' attribute not allowed on <input type='image'>",
+"input": "<input type=image value>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='image'>",
+"input": "<input type=image autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='image'>",
+"input": "<input type=image inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='image'>",
+"input": "<input type=image maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'action' attribute on <input type='image'>",
+"input": "<input type=image action>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='image'>",
+"input": "<input type=image tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='number'>",
+"input": "<input type=number accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='number'>",
+"input": "<input type=number accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='number'>",
+"input": "<input type=number replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='number'>",
+"input": "<input type=number disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='number'>",
+"input": "<input type=number alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='number'>",
+"input": "<input type=number size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='number'>",
+"input": "<input type=number checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='number'>",
+"input": "<input type=number min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='number'>",
+"input": "<input type=number pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='number'>",
+"input": "<input type=number readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='number'>",
+"input": "<input type=number template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='number'>",
+"input": "<input type=number autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='number'>",
+"input": "<input type=number target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='number'>",
+"input": "<input type=number method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='number'>",
+"input": "<input type=number form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='number'>",
+"input": "<input type=number max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='number'>",
+"input": "<input type=number step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='number'>",
+"input": "<input type=number enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='number'>",
+"input": "<input type=number src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='number'>",
+"input": "<input type=number name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='number'>",
+"input": "<input type=number required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='number'>",
+"input": "<input type=number list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='number'>",
+"input": "<input type=number value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='number'>",
+"input": "<input type=number autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='number'>",
+"input": "<input type=number inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='number'>",
+"input": "<input type=number maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='number'>",
+"input": "<input type=number action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='number'>",
+"input": "<input type=number tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='month'>",
+"input": "<input type=month accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='month'>",
+"input": "<input type=month accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='month'>",
+"input": "<input type=month replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='month'>",
+"input": "<input type=month disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='month'>",
+"input": "<input type=month alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='month'>",
+"input": "<input type=month size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='month'>",
+"input": "<input type=month checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='month'>",
+"input": "<input type=month min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='month'>",
+"input": "<input type=month pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='month'>",
+"input": "<input type=month readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='month'>",
+"input": "<input type=month template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='month'>",
+"input": "<input type=month autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='month'>",
+"input": "<input type=month target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='month'>",
+"input": "<input type=month method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='month'>",
+"input": "<input type=month form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='month'>",
+"input": "<input type=month max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='month'>",
+"input": "<input type=month step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='month'>",
+"input": "<input type=month enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='month'>",
+"input": "<input type=month src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='month'>",
+"input": "<input type=month name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='month'>",
+"input": "<input type=month required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='month'>",
+"input": "<input type=month list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='month'>",
+"input": "<input type=month value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='month'>",
+"input": "<input type=month autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='month'>",
+"input": "<input type=month inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='month'>",
+"input": "<input type=month maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='month'>",
+"input": "<input type=month action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='month'>",
+"input": "<input type=month tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='radio'>",
+"input": "<input type=radio accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='radio'>",
+"input": "<input type=radio disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'checked' attribute on <input type='radio'>",
+"input": "<input type=radio checked>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='radio'>",
+"input": "<input type=radio autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='radio'>",
+"input": "<input type=radio form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='radio'>",
+"input": "<input type=radio name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='radio'>",
+"input": "<input type=radio required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='radio'>",
+"input": "<input type=radio value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='radio'>",
+"input": "<input type=radio action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='radio'>",
+"input": "<input type=radio tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='file'>",
+"input": "<input type=file accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accept' attribute on <input type='file'>",
+"input": "<input type=file accept>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='file'>",
+"input": "<input type=file replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='file'>",
+"input": "<input type=file disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='file'>",
+"input": "<input type=file alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='file'>",
+"input": "<input type=file size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='file'>",
+"input": "<input type=file checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='file'>",
+"input": "<input type=file min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='file'>",
+"input": "<input type=file pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='file'>",
+"input": "<input type=file readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='file'>",
+"input": "<input type=file template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='file'>",
+"input": "<input type=file autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='file'>",
+"input": "<input type=file target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='file'>",
+"input": "<input type=file method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='file'>",
+"input": "<input type=file form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='file'>",
+"input": "<input type=file max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='file'>",
+"input": "<input type=file step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='file'>",
+"input": "<input type=file enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='file'>",
+"input": "<input type=file src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='file'>",
+"input": "<input type=file name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='file'>",
+"input": "<input type=file required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='file'>",
+"input": "<input type=file list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'value' attribute not allowed on <input type='file'>",
+"input": "<input type=file value>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='file'>",
+"input": "<input type=file autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='file'>",
+"input": "<input type=file inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='file'>",
+"input": "<input type=file maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='file'>",
+"input": "<input type=file action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='file'>",
+"input": "<input type=file tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='password'>",
+"input": "<input type=password accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='password'>",
+"input": "<input type=password accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='password'>",
+"input": "<input type=password replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='password'>",
+"input": "<input type=password disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='password'>",
+"input": "<input type=password alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'size' attribute on <input type='password'>",
+"input": "<input type=password size>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='password'>",
+"input": "<input type=password checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='password'>",
+"input": "<input type=password min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'pattern' attribute on <input type='password'>",
+"input": "<input type=password pattern>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='password'>",
+"input": "<input type=password readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='password'>",
+"input": "<input type=password template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='password'>",
+"input": "<input type=password autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='password'>",
+"input": "<input type=password target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='password'>",
+"input": "<input type=password method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='password'>",
+"input": "<input type=password form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='password'>",
+"input": "<input type=password max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='password'>",
+"input": "<input type=password step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='password'>",
+"input": "<input type=password enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='password'>",
+"input": "<input type=password src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='password'>",
+"input": "<input type=password name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='password'>",
+"input": "<input type=password required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='password'>",
+"input": "<input type=password list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='password'>",
+"input": "<input type=password value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='password'>",
+"input": "<input type=password autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'inputmode' attribute on <input type='password'>",
+"input": "<input type=password inputmode>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'maxlength' attribute on <input type='password'>",
+"input": "<input type=password maxlength>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='password'>",
+"input": "<input type=password action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='password'>",
+"input": "<input type=password tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='move-up'>",
+"input": "<input type=move-up accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='move-up'>",
+"input": "<input type=move-up disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='move-up'>",
+"input": "<input type=move-up autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='move-up'>",
+"input": "<input type=move-up form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='move-up'>",
+"input": "<input type=move-up name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='move-up'>",
+"input": "<input type=move-up value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='move-up'>",
+"input": "<input type=move-up action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='move-up'>",
+"input": "<input type=move-up tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='submit'>",
+"input": "<input type=submit accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'replace' attribute on <input type='submit'>",
+"input": "<input type=submit replace>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='submit'>",
+"input": "<input type=submit disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='submit'>",
+"input": "<input type=submit autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'target' attribute on <input type='submit'>",
+"input": "<input type=submit target>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'method' attribute on <input type='submit'>",
+"input": "<input type=submit method>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='submit'>",
+"input": "<input type=submit form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'enctype' attribute on <input type='submit'>",
+"input": "<input type=submit enctype>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='submit'>",
+"input": "<input type=submit name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='submit'>",
+"input": "<input type=submit value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='submit'>",
+"input": "<input type=submit maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'action' attribute on <input type='submit'>",
+"input": "<input type=submit action>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='submit'>",
+"input": "<input type=submit tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='add'>",
+"input": "<input type=add accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='add'>",
+"input": "<input type=add accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='add'>",
+"input": "<input type=add replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='add'>",
+"input": "<input type=add disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='add'>",
+"input": "<input type=add alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='add'>",
+"input": "<input type=add size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='add'>",
+"input": "<input type=add checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='add'>",
+"input": "<input type=add min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='add'>",
+"input": "<input type=add pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='add'>",
+"input": "<input type=add readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'template' attribute on <input type='add'>",
+"input": "<input type=add template>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='add'>",
+"input": "<input type=add autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='add'>",
+"input": "<input type=add target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='add'>",
+"input": "<input type=add method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='add'>",
+"input": "<input type=add form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='add'>",
+"input": "<input type=add max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='add'>",
+"input": "<input type=add step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='add'>",
+"input": "<input type=add enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='add'>",
+"input": "<input type=add src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='add'>",
+"input": "<input type=add name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='add'>",
+"input": "<input type=add required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='add'>",
+"input": "<input type=add list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='add'>",
+"input": "<input type=add value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='add'>",
+"input": "<input type=add autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='add'>",
+"input": "<input type=add inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='add'>",
+"input": "<input type=add maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='add'>",
+"input": "<input type=add action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='add'>",
+"input": "<input type=add tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accesskey' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden accesskey>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='hidden'>",
+"input": "<input type=hidden disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autofocus' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden autofocus>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='hidden'>",
+"input": "<input type=hidden form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='hidden'>",
+"input": "<input type=hidden name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='hidden'>",
+"input": "<input type=hidden value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'tabindex' attribute not allowed on <input type='hidden'>",
+"input": "<input type=hidden tabindex>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='email'>",
+"input": "<input type=email accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='email'>",
+"input": "<input type=email accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='email'>",
+"input": "<input type=email replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='email'>",
+"input": "<input type=email disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='email'>",
+"input": "<input type=email alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='email'>",
+"input": "<input type=email size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='email'>",
+"input": "<input type=email checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='email'>",
+"input": "<input type=email min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'pattern' attribute on <input type='email'>",
+"input": "<input type=email pattern>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='email'>",
+"input": "<input type=email readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='email'>",
+"input": "<input type=email template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='email'>",
+"input": "<input type=email autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='email'>",
+"input": "<input type=email target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='email'>",
+"input": "<input type=email method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='email'>",
+"input": "<input type=email form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='email'>",
+"input": "<input type=email max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='email'>",
+"input": "<input type=email step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='email'>",
+"input": "<input type=email enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='email'>",
+"input": "<input type=email src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='email'>",
+"input": "<input type=email name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='email'>",
+"input": "<input type=email required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='email'>",
+"input": "<input type=email list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='email'>",
+"input": "<input type=email value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='email'>",
+"input": "<input type=email autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'inputmode' attribute on <input type='email'>",
+"input": "<input type=email inputmode>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'maxlength' attribute on <input type='email'>",
+"input": "<input type=email maxlength>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='email'>",
+"input": "<input type=email action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='email'>",
+"input": "<input type=email tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='week'>",
+"input": "<input type=week accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='week'>",
+"input": "<input type=week accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='week'>",
+"input": "<input type=week replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='week'>",
+"input": "<input type=week disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='week'>",
+"input": "<input type=week alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='week'>",
+"input": "<input type=week size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='week'>",
+"input": "<input type=week checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='week'>",
+"input": "<input type=week min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='week'>",
+"input": "<input type=week pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='week'>",
+"input": "<input type=week readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='week'>",
+"input": "<input type=week template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='week'>",
+"input": "<input type=week autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='week'>",
+"input": "<input type=week target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='week'>",
+"input": "<input type=week method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='week'>",
+"input": "<input type=week form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='week'>",
+"input": "<input type=week max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='week'>",
+"input": "<input type=week step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='week'>",
+"input": "<input type=week enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='week'>",
+"input": "<input type=week src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='week'>",
+"input": "<input type=week name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='week'>",
+"input": "<input type=week required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='week'>",
+"input": "<input type=week list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='week'>",
+"input": "<input type=week value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='week'>",
+"input": "<input type=week autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='week'>",
+"input": "<input type=week inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='week'>",
+"input": "<input type=week maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='week'>",
+"input": "<input type=week action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='week'>",
+"input": "<input type=week tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='date'>",
+"input": "<input type=date accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='date'>",
+"input": "<input type=date accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='date'>",
+"input": "<input type=date replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='date'>",
+"input": "<input type=date disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='date'>",
+"input": "<input type=date alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='date'>",
+"input": "<input type=date size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='date'>",
+"input": "<input type=date checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='date'>",
+"input": "<input type=date min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='date'>",
+"input": "<input type=date pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='date'>",
+"input": "<input type=date readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='date'>",
+"input": "<input type=date template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='date'>",
+"input": "<input type=date autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='date'>",
+"input": "<input type=date target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='date'>",
+"input": "<input type=date method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='date'>",
+"input": "<input type=date form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='date'>",
+"input": "<input type=date max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='date'>",
+"input": "<input type=date step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='date'>",
+"input": "<input type=date enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='date'>",
+"input": "<input type=date src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='date'>",
+"input": "<input type=date name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='date'>",
+"input": "<input type=date required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='date'>",
+"input": "<input type=date list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='date'>",
+"input": "<input type=date value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='date'>",
+"input": "<input type=date autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='date'>",
+"input": "<input type=date inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='date'>",
+"input": "<input type=date maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='date'>",
+"input": "<input type=date action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='date'>",
+"input": "<input type=date tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='datetime'>",
+"input": "<input type=datetime accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='datetime'>",
+"input": "<input type=datetime disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='datetime'>",
+"input": "<input type=datetime min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='datetime'>",
+"input": "<input type=datetime readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='datetime'>",
+"input": "<input type=datetime autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='datetime'>",
+"input": "<input type=datetime form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='datetime'>",
+"input": "<input type=datetime max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='datetime'>",
+"input": "<input type=datetime step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='datetime'>",
+"input": "<input type=datetime name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='datetime'>",
+"input": "<input type=datetime required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='datetime'>",
+"input": "<input type=datetime list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='datetime'>",
+"input": "<input type=datetime value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='datetime'>",
+"input": "<input type=datetime autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='datetime'>",
+"input": "<input type=datetime action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='datetime'>",
+"input": "<input type=datetime tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='move-down'>",
+"input": "<input type=move-down accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='move-down'>",
+"input": "<input type=move-down disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='move-down'>",
+"input": "<input type=move-down autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='move-down'>",
+"input": "<input type=move-down form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='move-down'>",
+"input": "<input type=move-down name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='move-down'>",
+"input": "<input type=move-down value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='move-down'>",
+"input": "<input type=move-down action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='move-down'>",
+"input": "<input type=move-down tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='datetime-local'>",
+"input": "<input type=datetime-local action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='reset'>",
+"input": "<input type=reset accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='reset'>",
+"input": "<input type=reset disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='reset'>",
+"input": "<input type=reset autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='reset'>",
+"input": "<input type=reset form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='reset'>",
+"input": "<input type=reset name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='reset'>",
+"input": "<input type=reset value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='reset'>",
+"input": "<input type=reset action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='reset'>",
+"input": "<input type=reset tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='url'>",
+"input": "<input type=url accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='url'>",
+"input": "<input type=url accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='url'>",
+"input": "<input type=url replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='url'>",
+"input": "<input type=url disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='url'>",
+"input": "<input type=url alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='url'>",
+"input": "<input type=url size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='url'>",
+"input": "<input type=url checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='url'>",
+"input": "<input type=url min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'pattern' attribute on <input type='url'>",
+"input": "<input type=url pattern>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='url'>",
+"input": "<input type=url readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='url'>",
+"input": "<input type=url template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='url'>",
+"input": "<input type=url autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='url'>",
+"input": "<input type=url target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='url'>",
+"input": "<input type=url method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='url'>",
+"input": "<input type=url form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='url'>",
+"input": "<input type=url max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='url'>",
+"input": "<input type=url step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='url'>",
+"input": "<input type=url enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='url'>",
+"input": "<input type=url src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='url'>",
+"input": "<input type=url name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='url'>",
+"input": "<input type=url required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='url'>",
+"input": "<input type=url list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='url'>",
+"input": "<input type=url value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='url'>",
+"input": "<input type=url autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'inputmode' attribute on <input type='url'>",
+"input": "<input type=url inputmode>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'maxlength' attribute on <input type='url'>",
+"input": "<input type=url maxlength>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='url'>",
+"input": "<input type=url action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='url'>",
+"input": "<input type=url tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='button'>",
+"input": "<input type=button accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='button'>",
+"input": "<input type=button accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='button'>",
+"input": "<input type=button replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='button'>",
+"input": "<input type=button disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='button'>",
+"input": "<input type=button alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='button'>",
+"input": "<input type=button size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='button'>",
+"input": "<input type=button checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='button'>",
+"input": "<input type=button min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='button'>",
+"input": "<input type=button pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='button'>",
+"input": "<input type=button readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='button'>",
+"input": "<input type=button template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='button'>",
+"input": "<input type=button autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='button'>",
+"input": "<input type=button target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='button'>",
+"input": "<input type=button method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='button'>",
+"input": "<input type=button form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='button'>",
+"input": "<input type=button max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='button'>",
+"input": "<input type=button step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='button'>",
+"input": "<input type=button enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='button'>",
+"input": "<input type=button src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='button'>",
+"input": "<input type=button name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='button'>",
+"input": "<input type=button required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='button'>",
+"input": "<input type=button list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='button'>",
+"input": "<input type=button value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='button'>",
+"input": "<input type=button autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='button'>",
+"input": "<input type=button inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='button'>",
+"input": "<input type=button maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='button'>",
+"input": "<input type=button action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='button'>",
+"input": "<input type=button tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='remove'>",
+"input": "<input type=remove accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='remove'>",
+"input": "<input type=remove disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'min' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove min>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'readonly' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove readonly>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='remove'>",
+"input": "<input type=remove autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='remove'>",
+"input": "<input type=remove form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'max' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove max>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'step' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove step>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='remove'>",
+"input": "<input type=remove name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'required' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove required>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'list' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove list>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='remove'>",
+"input": "<input type=remove value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'autocomplete' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove autocomplete>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='remove'>",
+"input": "<input type=remove action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='remove'>",
+"input": "<input type=remove tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='range'>",
+"input": "<input type=range accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='range'>",
+"input": "<input type=range accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='range'>",
+"input": "<input type=range replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='range'>",
+"input": "<input type=range disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='range'>",
+"input": "<input type=range alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='range'>",
+"input": "<input type=range size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='range'>",
+"input": "<input type=range checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='range'>",
+"input": "<input type=range min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='range'>",
+"input": "<input type=range pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='range'>",
+"input": "<input type=range readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='range'>",
+"input": "<input type=range template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='range'>",
+"input": "<input type=range autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='range'>",
+"input": "<input type=range target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='range'>",
+"input": "<input type=range method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='range'>",
+"input": "<input type=range form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='range'>",
+"input": "<input type=range max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='range'>",
+"input": "<input type=range step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='range'>",
+"input": "<input type=range enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='range'>",
+"input": "<input type=range src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='range'>",
+"input": "<input type=range name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='range'>",
+"input": "<input type=range required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='range'>",
+"input": "<input type=range list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='range'>",
+"input": "<input type=range value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='range'>",
+"input": "<input type=range autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='range'>",
+"input": "<input type=range inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='range'>",
+"input": "<input type=range maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='range'>",
+"input": "<input type=range action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='range'>",
+"input": "<input type=range tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'accesskey' attribute on <input type='time'>",
+"input": "<input type=time accesskey>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'accept' attribute not allowed on <input type='time'>",
+"input": "<input type=time accept>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'replace' attribute not allowed on <input type='time'>",
+"input": "<input type=time replace>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'disabled' attribute on <input type='time'>",
+"input": "<input type=time disabled>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'alt' attribute not allowed on <input type='time'>",
+"input": "<input type=time alt>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'size' attribute not allowed on <input type='time'>",
+"input": "<input type=time size>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'checked' attribute not allowed on <input type='time'>",
+"input": "<input type=time checked>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'min' attribute on <input type='time'>",
+"input": "<input type=time min>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'pattern' attribute not allowed on <input type='time'>",
+"input": "<input type=time pattern>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'readonly' attribute on <input type='time'>",
+"input": "<input type=time readonly>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'template' attribute not allowed on <input type='time'>",
+"input": "<input type=time template>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autofocus' attribute on <input type='time'>",
+"input": "<input type=time autofocus>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'target' attribute not allowed on <input type='time'>",
+"input": "<input type=time target>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'method' attribute not allowed on <input type='time'>",
+"input": "<input type=time method>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'form' attribute on <input type='time'>",
+"input": "<input type=time form>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'max' attribute on <input type='time'>",
+"input": "<input type=time max>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'step' attribute on <input type='time'>",
+"input": "<input type=time step>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'enctype' attribute not allowed on <input type='time'>",
+"input": "<input type=time enctype>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'src' attribute not allowed on <input type='time'>",
+"input": "<input type=time src>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'name' attribute on <input type='time'>",
+"input": "<input type=time name>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'required' attribute on <input type='time'>",
+"input": "<input type=time required>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'list' attribute on <input type='time'>",
+"input": "<input type=time list>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'value' attribute on <input type='time'>",
+"input": "<input type=time value>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'autocomplete' attribute on <input type='time'>",
+"input": "<input type=time autocomplete>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'inputmode' attribute not allowed on <input type='time'>",
+"input": "<input type=time inputmode>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'maxlength' attribute not allowed on <input type='time'>",
+"input": "<input type=time maxlength>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "'action' attribute not allowed on <input type='time'>",
+"input": "<input type=time action>",
+"fail-unless": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "allowed 'tabindex' attribute on <input type='time'>",
+"input": "<input type=time tabindex>",
+"fail-if": "attribute-not-allowed-on-this-input-type"},
+
+{"description": "unknown 'foo' attribute on <input type='checkbox'>",
+"input": "<input type=checkbox foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='text'>",
+"input": "<input type=text foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='image'>",
+"input": "<input type=image foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='number'>",
+"input": "<input type=number foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='month'>",
+"input": "<input type=month foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='radio'>",
+"input": "<input type=radio foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='file'>",
+"input": "<input type=file foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='password'>",
+"input": "<input type=password foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='move-up'>",
+"input": "<input type=move-up foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='submit'>",
+"input": "<input type=submit foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='add'>",
+"input": "<input type=add foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='hidden'>",
+"input": "<input type=hidden foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='email'>",
+"input": "<input type=email foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='week'>",
+"input": "<input type=week foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='date'>",
+"input": "<input type=date foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='datetime'>",
+"input": "<input type=datetime foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='move-down'>",
+"input": "<input type=move-down foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='datetime-local'>",
+"input": "<input type=datetime-local foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='reset'>",
+"input": "<input type=reset foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='url'>",
+"input": "<input type=url foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='button'>",
+"input": "<input type=button foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='remove'>",
+"input": "<input type=remove foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='range'>",
+"input": "<input type=range foo>",
+"fail-unless": "unknown-attribute"},
+
+{"description": "unknown 'foo' attribute on <input type='time'>",
+"input": "<input type=time foo>",
+"fail-unless": "unknown-attribute"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/irrelevantattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/irrelevantattribute.test
new file mode 100755
index 000000000..fa4061300
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/irrelevantattribute.test
@@ -0,0 +1,63 @@
+{"tests": [
+
+{"description": "valid irrelevant attribute value 'irrelevant'",
+"input": "<span irrelevant=irrelevant>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "valid irrelevant attribute value ''",
+"input": "<span irrelevant=''>",
+"fail-if": "invalid-attribute-value"},
+
+{"description": "invalid irrelevant attribute value due to uppercase",
+"input": "<span irrelevant=IRRELEVANT>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value due to mixed case",
+"input": "<span irrelevant=IrReLeVaNt>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value due to leading space",
+"input": "<span irrelevant=' irrelevant'>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value due to trailing space",
+"input": "<span irrelevant='irrelevant '>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'foo'",
+"input": "<span irrelevant=foo>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value '0'",
+"input": "<span irrelevant=0>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value '1'",
+"input": "<span irrelevant=1>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'yes'",
+"input": "<span irrelevant=yes>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'no'",
+"input": "<span irrelevant=no>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'true'",
+"input": "<span irrelevant=true>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'false'",
+"input": "<span irrelevant=false>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'auto'",
+"input": "<span irrelevant=auto>",
+"fail-unless": "invalid-boolean-value"},
+
+{"description": "invalid irrelevant attribute value 'inherit'",
+"input": "<span irrelevant=inherit>",
+"fail-unless": "invalid-boolean-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/langattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/langattribute.test
new file mode 100755
index 000000000..6c831a179
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/langattribute.test
@@ -0,0 +1,5579 @@
+{"tests": [
+
+{"description": "valid lang attribute 'roh'",
+"input": "<span lang=roh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gv'",
+"input": "<span lang=gv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gu'",
+"input": "<span lang=gu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'scn'",
+"input": "<span lang=scn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rom'",
+"input": "<span lang=rom>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ron'",
+"input": "<span lang=ron>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'alg'",
+"input": "<span lang=alg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oss'",
+"input": "<span lang=oss>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ale'",
+"input": "<span lang=ale>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'alb'",
+"input": "<span lang=alb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sco'",
+"input": "<span lang=sco>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'scc'",
+"input": "<span lang=scc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mni'",
+"input": "<span lang=mni>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gd'",
+"input": "<span lang=gd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'per'",
+"input": "<span lang=per>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ga'",
+"input": "<span lang=ga>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nwc'",
+"input": "<span lang=nwc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'osa'",
+"input": "<span lang=osa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gn'",
+"input": "<span lang=gn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'alt'",
+"input": "<span lang=alt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gl'",
+"input": "<span lang=gl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'scr'",
+"input": "<span lang=scr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mwr'",
+"input": "<span lang=mwr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ty'",
+"input": "<span lang=ty>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tw'",
+"input": "<span lang=tw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tt'",
+"input": "<span lang=tt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tr'",
+"input": "<span lang=tr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ts'",
+"input": "<span lang=ts>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tn'",
+"input": "<span lang=tn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'to'",
+"input": "<span lang=to>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tl'",
+"input": "<span lang=tl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tk'",
+"input": "<span lang=tk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'th'",
+"input": "<span lang=th>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ti'",
+"input": "<span lang=ti>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ven'",
+"input": "<span lang=ven>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tg'",
+"input": "<span lang=tg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'te'",
+"input": "<span lang=te>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uga'",
+"input": "<span lang=uga>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ta'",
+"input": "<span lang=ta>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fas'",
+"input": "<span lang=fas>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fat'",
+"input": "<span lang=fat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fan'",
+"input": "<span lang=fan>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fao'",
+"input": "<span lang=fao>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'got'",
+"input": "<span lang=got>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sme'",
+"input": "<span lang=sme>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'din'",
+"input": "<span lang=din>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hye'",
+"input": "<span lang=hye>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'guj'",
+"input": "<span lang=guj>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cmc'",
+"input": "<span lang=cmc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srd'",
+"input": "<span lang=srd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mdr'",
+"input": "<span lang=mdr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ml'",
+"input": "<span lang=ml>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'div'",
+"input": "<span lang=div>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zh'",
+"input": "<span lang=zh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tem'",
+"input": "<span lang=tem>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xho'",
+"input": "<span lang=xho>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mno'",
+"input": "<span lang=mno>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'roa'",
+"input": "<span lang=roa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'za'",
+"input": "<span lang=za>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'deu'",
+"input": "<span lang=deu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mk'",
+"input": "<span lang=mk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nbl'",
+"input": "<span lang=nbl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zu'",
+"input": "<span lang=zu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ter'",
+"input": "<span lang=ter>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tet'",
+"input": "<span lang=tet>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mnc'",
+"input": "<span lang=mnc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sun'",
+"input": "<span lang=sun>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'abk'",
+"input": "<span lang=abk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'suk'",
+"input": "<span lang=suk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kur'",
+"input": "<span lang=kur>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kum'",
+"input": "<span lang=kum>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'slo'",
+"input": "<span lang=slo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sus'",
+"input": "<span lang=sus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'new'",
+"input": "<span lang=new>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kua'",
+"input": "<span lang=kua>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sux'",
+"input": "<span lang=sux>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'den'",
+"input": "<span lang=den>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mr'",
+"input": "<span lang=mr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mul'",
+"input": "<span lang=mul>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lez'",
+"input": "<span lang=lez>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gla'",
+"input": "<span lang=gla>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bos'",
+"input": "<span lang=bos>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gle'",
+"input": "<span lang=gle>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eka'",
+"input": "<span lang=eka>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'glg'",
+"input": "<span lang=glg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'akk'",
+"input": "<span lang=akk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dra'",
+"input": "<span lang=dra>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aka'",
+"input": "<span lang=aka>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bod'",
+"input": "<span lang=bod>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'glv'",
+"input": "<span lang=glv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jrb'",
+"input": "<span lang=jrb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vie'",
+"input": "<span lang=vie>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ipk'",
+"input": "<span lang=ipk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uzb'",
+"input": "<span lang=uzb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sgn'",
+"input": "<span lang=sgn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sga'",
+"input": "<span lang=sga>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bre'",
+"input": "<span lang=bre>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'apa'",
+"input": "<span lang=apa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bra'",
+"input": "<span lang=bra>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aym'",
+"input": "<span lang=aym>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cha'",
+"input": "<span lang=cha>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chb'",
+"input": "<span lang=chb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'che'",
+"input": "<span lang=che>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chg'",
+"input": "<span lang=chg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chi'",
+"input": "<span lang=chi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chk'",
+"input": "<span lang=chk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aus'",
+"input": "<span lang=aus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chn'",
+"input": "<span lang=chn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cho'",
+"input": "<span lang=cho>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chp'",
+"input": "<span lang=chp>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chr'",
+"input": "<span lang=chr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chu'",
+"input": "<span lang=chu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chv'",
+"input": "<span lang=chv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chy'",
+"input": "<span lang=chy>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'msa'",
+"input": "<span lang=msa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iii'",
+"input": "<span lang=iii>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vot'",
+"input": "<span lang=vot>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mg'",
+"input": "<span lang=mg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ndo'",
+"input": "<span lang=ndo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ibo'",
+"input": "<span lang=ibo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'car'",
+"input": "<span lang=car>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mo'",
+"input": "<span lang=mo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mn'",
+"input": "<span lang=mn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mi'",
+"input": "<span lang=mi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mh'",
+"input": "<span lang=mh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cau'",
+"input": "<span lang=cau>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cat'",
+"input": "<span lang=cat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bur'",
+"input": "<span lang=bur>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mt'",
+"input": "<span lang=mt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cai'",
+"input": "<span lang=cai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'del'",
+"input": "<span lang=del>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ms'",
+"input": "<span lang=ms>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'byn'",
+"input": "<span lang=byn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mwl'",
+"input": "<span lang=mwl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'my'",
+"input": "<span lang=my>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cad'",
+"input": "<span lang=cad>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tat'",
+"input": "<span lang=tat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nde'",
+"input": "<span lang=nde>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tam'",
+"input": "<span lang=tam>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'spa'",
+"input": "<span lang=spa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tah'",
+"input": "<span lang=tah>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tai'",
+"input": "<span lang=tai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cze'",
+"input": "<span lang=cze>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'afh'",
+"input": "<span lang=afh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eng'",
+"input": "<span lang=eng>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'afa'",
+"input": "<span lang=afa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'id'",
+"input": "<span lang=id>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nyn'",
+"input": "<span lang=nyn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nyo'",
+"input": "<span lang=nyo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gez'",
+"input": "<span lang=gez>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nya'",
+"input": "<span lang=nya>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sio'",
+"input": "<span lang=sio>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sin'",
+"input": "<span lang=sin>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'afr'",
+"input": "<span lang=afr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'map'",
+"input": "<span lang=map>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fr'",
+"input": "<span lang=fr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lao'",
+"input": "<span lang=lao>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lah'",
+"input": "<span lang=lah>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nym'",
+"input": "<span lang=nym>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sna'",
+"input": "<span lang=sna>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lad'",
+"input": "<span lang=lad>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fy'",
+"input": "<span lang=fy>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'snk'",
+"input": "<span lang=snk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fa'",
+"input": "<span lang=fa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mac'",
+"input": "<span lang=mac>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mad'",
+"input": "<span lang=mad>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ff'",
+"input": "<span lang=ff>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lat'",
+"input": "<span lang=lat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fi'",
+"input": "<span lang=fi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fj'",
+"input": "<span lang=fj>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mal'",
+"input": "<span lang=mal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mao'",
+"input": "<span lang=mao>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fo'",
+"input": "<span lang=fo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mak'",
+"input": "<span lang=mak>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'egy'",
+"input": "<span lang=egy>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'znd'",
+"input": "<span lang=znd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ss'",
+"input": "<span lang=ss>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sr'",
+"input": "<span lang=sr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sq'",
+"input": "<span lang=sq>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sit'",
+"input": "<span lang=sit>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sw'",
+"input": "<span lang=sw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sv'",
+"input": "<span lang=sv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'su'",
+"input": "<span lang=su>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'st'",
+"input": "<span lang=st>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sk'",
+"input": "<span lang=sk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'si'",
+"input": "<span lang=si>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sh'",
+"input": "<span lang=sh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'so'",
+"input": "<span lang=so>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sn'",
+"input": "<span lang=sn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sm'",
+"input": "<span lang=sm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sl'",
+"input": "<span lang=sl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sc'",
+"input": "<span lang=sc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sa'",
+"input": "<span lang=sa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sg'",
+"input": "<span lang=sg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'se'",
+"input": "<span lang=se>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sd'",
+"input": "<span lang=sd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zen'",
+"input": "<span lang=zen>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kbd'",
+"input": "<span lang=kbd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'enm'",
+"input": "<span lang=enm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iro'",
+"input": "<span lang=iro>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vai'",
+"input": "<span lang=vai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'csb'",
+"input": "<span lang=csb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tsn'",
+"input": "<span lang=tsn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lg'",
+"input": "<span lang=lg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lb'",
+"input": "<span lang=lb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'la'",
+"input": "<span lang=la>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ln'",
+"input": "<span lang=ln>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lo'",
+"input": "<span lang=lo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'li'",
+"input": "<span lang=li>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lv'",
+"input": "<span lang=lv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lt'",
+"input": "<span lang=lt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lu'",
+"input": "<span lang=lu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hin'",
+"input": "<span lang=hin>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fij'",
+"input": "<span lang=fij>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fin'",
+"input": "<span lang=fin>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eus'",
+"input": "<span lang=eus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yi'",
+"input": "<span lang=yi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'non'",
+"input": "<span lang=non>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ceb'",
+"input": "<span lang=ceb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yo'",
+"input": "<span lang=yo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dan'",
+"input": "<span lang=dan>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cel'",
+"input": "<span lang=cel>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bat'",
+"input": "<span lang=bat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nob'",
+"input": "<span lang=nob>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dak'",
+"input": "<span lang=dak>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ces'",
+"input": "<span lang=ces>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dar'",
+"input": "<span lang=dar>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'son'",
+"input": "<span lang=son>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'day'",
+"input": "<span lang=day>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nor'",
+"input": "<span lang=nor>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gba'",
+"input": "<span lang=gba>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ssa'",
+"input": "<span lang=ssa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hil'",
+"input": "<span lang=hil>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kpe'",
+"input": "<span lang=kpe>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'man'",
+"input": "<span lang=man>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wel'",
+"input": "<span lang=wel>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'el'",
+"input": "<span lang=el>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eo'",
+"input": "<span lang=eo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'en'",
+"input": "<span lang=en>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lam'",
+"input": "<span lang=lam>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ee'",
+"input": "<span lang=ee>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tpi'",
+"input": "<span lang=tpi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mdf'",
+"input": "<span lang=mdf>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mas'",
+"input": "<span lang=mas>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mar'",
+"input": "<span lang=mar>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eu'",
+"input": "<span lang=eu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'et'",
+"input": "<span lang=et>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'es'",
+"input": "<span lang=es>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ru'",
+"input": "<span lang=ru>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rw'",
+"input": "<span lang=rw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'goh'",
+"input": "<span lang=goh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sms'",
+"input": "<span lang=sms>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smo'",
+"input": "<span lang=smo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smn'",
+"input": "<span lang=smn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smj'",
+"input": "<span lang=smj>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smi'",
+"input": "<span lang=smi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nic'",
+"input": "<span lang=nic>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rm'",
+"input": "<span lang=rm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rn'",
+"input": "<span lang=rn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ro'",
+"input": "<span lang=ro>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dsb'",
+"input": "<span lang=dsb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sma'",
+"input": "<span lang=sma>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gor'",
+"input": "<span lang=gor>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ast'",
+"input": "<span lang=ast>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'orm'",
+"input": "<span lang=orm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'que'",
+"input": "<span lang=que>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ori'",
+"input": "<span lang=ori>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'crh'",
+"input": "<span lang=crh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'asm'",
+"input": "<span lang=asm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pus'",
+"input": "<span lang=pus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kik'",
+"input": "<span lang=kik>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ltz'",
+"input": "<span lang=ltz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ath'",
+"input": "<span lang=ath>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wln'",
+"input": "<span lang=wln>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'isl'",
+"input": "<span lang=isl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xh'",
+"input": "<span lang=xh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mag'",
+"input": "<span lang=mag>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mai'",
+"input": "<span lang=mai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xx'",
+"input": "<span lang=xx>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mah'",
+"input": "<span lang=mah>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tel'",
+"input": "<span lang=tel>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lav'",
+"input": "<span lang=lav>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zap'",
+"input": "<span lang=zap>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yid'",
+"input": "<span lang=yid>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kok'",
+"input": "<span lang=kok>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kom'",
+"input": "<span lang=kom>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kon'",
+"input": "<span lang=kon>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ukr'",
+"input": "<span lang=ukr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ton'",
+"input": "<span lang=ton>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kos'",
+"input": "<span lang=kos>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kor'",
+"input": "<span lang=kor>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tog'",
+"input": "<span lang=tog>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hun'",
+"input": "<span lang=hun>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hup'",
+"input": "<span lang=hup>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cym'",
+"input": "<span lang=cym>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'udm'",
+"input": "<span lang=udm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bej'",
+"input": "<span lang=bej>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ben'",
+"input": "<span lang=ben>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bel'",
+"input": "<span lang=bel>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bem'",
+"input": "<span lang=bem>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tsi'",
+"input": "<span lang=tsi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aar'",
+"input": "<span lang=aar>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ber'",
+"input": "<span lang=ber>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nzi'",
+"input": "<span lang=nzi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sai'",
+"input": "<span lang=sai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ang'",
+"input": "<span lang=ang>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pra'",
+"input": "<span lang=pra>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'san'",
+"input": "<span lang=san>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bho'",
+"input": "<span lang=bho>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sal'",
+"input": "<span lang=sal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pro'",
+"input": "<span lang=pro>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'raj'",
+"input": "<span lang=raj>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sad'",
+"input": "<span lang=sad>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'anp'",
+"input": "<span lang=anp>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rap'",
+"input": "<span lang=rap>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sas'",
+"input": "<span lang=sas>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iba'",
+"input": "<span lang=iba>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'myn'",
+"input": "<span lang=myn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'min'",
+"input": "<span lang=min>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lim'",
+"input": "<span lang=lim>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lin'",
+"input": "<span lang=lin>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nah'",
+"input": "<span lang=nah>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lit'",
+"input": "<span lang=lit>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'efi'",
+"input": "<span lang=efi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srn'",
+"input": "<span lang=srn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arn'",
+"input": "<span lang=arn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ypk'",
+"input": "<span lang=ypk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mis'",
+"input": "<span lang=mis>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kac'",
+"input": "<span lang=kac>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kab'",
+"input": "<span lang=kab>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kaa'",
+"input": "<span lang=kaa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kan'",
+"input": "<span lang=kan>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kam'",
+"input": "<span lang=kam>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kal'",
+"input": "<span lang=kal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kas'",
+"input": "<span lang=kas>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kar'",
+"input": "<span lang=kar>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kaw'",
+"input": "<span lang=kaw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kau'",
+"input": "<span lang=kau>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kat'",
+"input": "<span lang=kat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kaz'",
+"input": "<span lang=kaz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tyv'",
+"input": "<span lang=tyv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'awa'",
+"input": "<span lang=awa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'urd'",
+"input": "<span lang=urd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ka'",
+"input": "<span lang=ka>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'doi'",
+"input": "<span lang=doi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kg'",
+"input": "<span lang=kg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kk'",
+"input": "<span lang=kk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kj'",
+"input": "<span lang=kj>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ki'",
+"input": "<span lang=ki>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ko'",
+"input": "<span lang=ko>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kn'",
+"input": "<span lang=kn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'km'",
+"input": "<span lang=km>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kl'",
+"input": "<span lang=kl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ks'",
+"input": "<span lang=ks>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kr'",
+"input": "<span lang=kr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kw'",
+"input": "<span lang=kw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kv'",
+"input": "<span lang=kv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ku'",
+"input": "<span lang=ku>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ky'",
+"input": "<span lang=ky>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ota'",
+"input": "<span lang=ota>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kut'",
+"input": "<span lang=kut>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tkl'",
+"input": "<span lang=tkl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nld'",
+"input": "<span lang=nld>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oji'",
+"input": "<span lang=oji>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oci'",
+"input": "<span lang=oci>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ha'",
+"input": "<span lang=ha>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wol'",
+"input": "<span lang=wol>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jav'",
+"input": "<span lang=jav>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hrv'",
+"input": "<span lang=hrv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ger'",
+"input": "<span lang=ger>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mga'",
+"input": "<span lang=mga>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hit'",
+"input": "<span lang=hit>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dyu'",
+"input": "<span lang=dyu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ssw'",
+"input": "<span lang=ssw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'de'",
+"input": "<span lang=de>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'da'",
+"input": "<span lang=da>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dz'",
+"input": "<span lang=dz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lui'",
+"input": "<span lang=lui>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dv'",
+"input": "<span lang=dv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ita'",
+"input": "<span lang=ita>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'geo'",
+"input": "<span lang=geo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'him'",
+"input": "<span lang=him>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gem'",
+"input": "<span lang=gem>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'crp'",
+"input": "<span lang=crp>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'qu'",
+"input": "<span lang=qu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bas'",
+"input": "<span lang=bas>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'baq'",
+"input": "<span lang=baq>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bad'",
+"input": "<span lang=bad>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nep'",
+"input": "<span lang=nep>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cre'",
+"input": "<span lang=cre>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ban'",
+"input": "<span lang=ban>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'x'",
+"input": "<span lang=x>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bal'",
+"input": "<span lang=bal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bam'",
+"input": "<span lang=bam>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bak'",
+"input": "<span lang=bak>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'shn'",
+"input": "<span lang=shn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bai'",
+"input": "<span lang=bai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arp'",
+"input": "<span lang=arp>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'art'",
+"input": "<span lang=art>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arw'",
+"input": "<span lang=arw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'qtz'",
+"input": "<span lang=qtz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ara'",
+"input": "<span lang=ara>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arc'",
+"input": "<span lang=arc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arg'",
+"input": "<span lang=arg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sem'",
+"input": "<span lang=sem>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sel'",
+"input": "<span lang=sel>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nub'",
+"input": "<span lang=nub>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arm'",
+"input": "<span lang=arm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'btk'",
+"input": "<span lang=btk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lus'",
+"input": "<span lang=lus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iku'",
+"input": "<span lang=iku>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mus'",
+"input": "<span lang=mus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lua'",
+"input": "<span lang=lua>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lub'",
+"input": "<span lang=lub>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lug'",
+"input": "<span lang=lug>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ira'",
+"input": "<span lang=ira>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mun'",
+"input": "<span lang=mun>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tur'",
+"input": "<span lang=tur>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lun'",
+"input": "<span lang=lun>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'luo'",
+"input": "<span lang=luo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'men'",
+"input": "<span lang=men>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wa'",
+"input": "<span lang=wa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tso'",
+"input": "<span lang=tso>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wo'",
+"input": "<span lang=wo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jv'",
+"input": "<span lang=jv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tut'",
+"input": "<span lang=tut>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ind'",
+"input": "<span lang=ind>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tuk'",
+"input": "<span lang=tuk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tum'",
+"input": "<span lang=tum>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ja'",
+"input": "<span lang=ja>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cop'",
+"input": "<span lang=cop>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cos'",
+"input": "<span lang=cos>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cor'",
+"input": "<span lang=cor>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ilo'",
+"input": "<span lang=ilo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fiu'",
+"input": "<span lang=fiu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gwi'",
+"input": "<span lang=gwi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'und'",
+"input": "<span lang=und>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gws'",
+"input": "<span lang=gws>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tli'",
+"input": "<span lang=tli>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tlh'",
+"input": "<span lang=tlh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nno'",
+"input": "<span lang=nno>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ch'",
+"input": "<span lang=ch>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'co'",
+"input": "<span lang=co>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ca'",
+"input": "<span lang=ca>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'por'",
+"input": "<span lang=por>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ce'",
+"input": "<span lang=ce>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pon'",
+"input": "<span lang=pon>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cy'",
+"input": "<span lang=cy>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sah'",
+"input": "<span lang=sah>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cs'",
+"input": "<span lang=cs>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cr'",
+"input": "<span lang=cr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ady'",
+"input": "<span lang=ady>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cv'",
+"input": "<span lang=cv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cu'",
+"input": "<span lang=cu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ps'",
+"input": "<span lang=ps>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fra'",
+"input": "<span lang=fra>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dum'",
+"input": "<span lang=dum>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pt'",
+"input": "<span lang=pt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'swa'",
+"input": "<span lang=swa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dua'",
+"input": "<span lang=dua>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fro'",
+"input": "<span lang=fro>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yap'",
+"input": "<span lang=yap>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'frm'",
+"input": "<span lang=frm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tiv'",
+"input": "<span lang=tiv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'frs'",
+"input": "<span lang=frs>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'frr'",
+"input": "<span lang=frr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yao'",
+"input": "<span lang=yao>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pa'",
+"input": "<span lang=pa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xal'",
+"input": "<span lang=xal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fry'",
+"input": "<span lang=fry>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pi'",
+"input": "<span lang=pi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dut'",
+"input": "<span lang=dut>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pl'",
+"input": "<span lang=pl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gay'",
+"input": "<span lang=gay>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oto'",
+"input": "<span lang=oto>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sag'",
+"input": "<span lang=sag>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hmn'",
+"input": "<span lang=hmn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hmo'",
+"input": "<span lang=hmo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'an'",
+"input": "<span lang=an>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gaa'",
+"input": "<span lang=gaa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fur'",
+"input": "<span lang=fur>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mlg'",
+"input": "<span lang=mlg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'slv'",
+"input": "<span lang=slv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ain'",
+"input": "<span lang=ain>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fil'",
+"input": "<span lang=fil>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mlt'",
+"input": "<span lang=mlt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'slk'",
+"input": "<span lang=slk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rar'",
+"input": "<span lang=rar>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ful'",
+"input": "<span lang=ful>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sla'",
+"input": "<span lang=sla>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 've'",
+"input": "<span lang=ve>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jpn'",
+"input": "<span lang=jpn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vol'",
+"input": "<span lang=vol>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vi'",
+"input": "<span lang=vi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'is'",
+"input": "<span lang=is>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kho'",
+"input": "<span lang=kho>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iu'",
+"input": "<span lang=iu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'it'",
+"input": "<span lang=it>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vo'",
+"input": "<span lang=vo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ii'",
+"input": "<span lang=ii>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ay'",
+"input": "<span lang=ay>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ik'",
+"input": "<span lang=ik>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'io'",
+"input": "<span lang=io>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kha'",
+"input": "<span lang=kha>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ia'",
+"input": "<span lang=ia>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ave'",
+"input": "<span lang=ave>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jpr'",
+"input": "<span lang=jpr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ie'",
+"input": "<span lang=ie>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ava'",
+"input": "<span lang=ava>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ig'",
+"input": "<span lang=ig>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pap'",
+"input": "<span lang=pap>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ewo'",
+"input": "<span lang=ewo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pau'",
+"input": "<span lang=pau>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ewe'",
+"input": "<span lang=ewe>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'paa'",
+"input": "<span lang=paa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'qaa'",
+"input": "<span lang=qaa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pag'",
+"input": "<span lang=pag>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sat'",
+"input": "<span lang=sat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pal'",
+"input": "<span lang=pal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pam'",
+"input": "<span lang=pam>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pan'",
+"input": "<span lang=pan>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'phi'",
+"input": "<span lang=phi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nog'",
+"input": "<span lang=nog>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'phn'",
+"input": "<span lang=phn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kir'",
+"input": "<span lang=kir>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nia'",
+"input": "<span lang=nia>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dgr'",
+"input": "<span lang=dgr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'syr'",
+"input": "<span lang=syr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kin'",
+"input": "<span lang=kin>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tup'",
+"input": "<span lang=tup>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'niu'",
+"input": "<span lang=niu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gsw'",
+"input": "<span lang=gsw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'epo'",
+"input": "<span lang=epo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jbo'",
+"input": "<span lang=jbo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mic'",
+"input": "<span lang=mic>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tha'",
+"input": "<span lang=tha>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sam'",
+"input": "<span lang=sam>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hai'",
+"input": "<span lang=hai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gmh'",
+"input": "<span lang=gmh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cus'",
+"input": "<span lang=cus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ell'",
+"input": "<span lang=ell>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wen'",
+"input": "<span lang=wen>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bnt'",
+"input": "<span lang=bnt>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fre'",
+"input": "<span lang=fre>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'elx'",
+"input": "<span lang=elx>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ada'",
+"input": "<span lang=ada>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nav'",
+"input": "<span lang=nav>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hat'",
+"input": "<span lang=hat>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hau'",
+"input": "<span lang=hau>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'haw'",
+"input": "<span lang=haw>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bin'",
+"input": "<span lang=bin>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'amh'",
+"input": "<span lang=amh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bik'",
+"input": "<span lang=bik>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bih'",
+"input": "<span lang=bih>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mos'",
+"input": "<span lang=mos>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'moh'",
+"input": "<span lang=moh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mon'",
+"input": "<span lang=mon>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mol'",
+"input": "<span lang=mol>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bis'",
+"input": "<span lang=bis>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bla'",
+"input": "<span lang=bla>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pol'",
+"input": "<span lang=pol>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tib'",
+"input": "<span lang=tib>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tvl'",
+"input": "<span lang=tvl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tgk'",
+"input": "<span lang=tgk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ijo'",
+"input": "<span lang=ijo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'est'",
+"input": "<span lang=est>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kmb'",
+"input": "<span lang=kmb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ice'",
+"input": "<span lang=ice>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'peo'",
+"input": "<span lang=peo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tgl'",
+"input": "<span lang=tgl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'umb'",
+"input": "<span lang=umb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tmh'",
+"input": "<span lang=tmh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fon'",
+"input": "<span lang=fon>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hsb'",
+"input": "<span lang=hsb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'be'",
+"input": "<span lang=be>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bg'",
+"input": "<span lang=bg>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'run'",
+"input": "<span lang=run>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ba'",
+"input": "<span lang=ba>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rum'",
+"input": "<span lang=rum>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bm'",
+"input": "<span lang=bm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bn'",
+"input": "<span lang=bn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bo'",
+"input": "<span lang=bo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bh'",
+"input": "<span lang=bh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bi'",
+"input": "<span lang=bi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'br'",
+"input": "<span lang=br>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bs'",
+"input": "<span lang=bs>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rus'",
+"input": "<span lang=rus>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rup'",
+"input": "<span lang=rup>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pli'",
+"input": "<span lang=pli>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'om'",
+"input": "<span lang=om>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oj'",
+"input": "<span lang=oj>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ace'",
+"input": "<span lang=ace>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ach'",
+"input": "<span lang=ach>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oc'",
+"input": "<span lang=oc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dzo'",
+"input": "<span lang=dzo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kru'",
+"input": "<span lang=kru>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srr'",
+"input": "<span lang=srr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ido'",
+"input": "<span lang=ido>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srp'",
+"input": "<span lang=srp>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kro'",
+"input": "<span lang=kro>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'krl'",
+"input": "<span lang=krl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'krc'",
+"input": "<span lang=krc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nds'",
+"input": "<span lang=nds>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'os'",
+"input": "<span lang=os>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'or'",
+"input": "<span lang=or>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zul'",
+"input": "<span lang=zul>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'twi'",
+"input": "<span lang=twi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sog'",
+"input": "<span lang=sog>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nso'",
+"input": "<span lang=nso>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'swe'",
+"input": "<span lang=swe>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'som'",
+"input": "<span lang=som>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chm'",
+"input": "<span lang=chm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'snd'",
+"input": "<span lang=snd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sot'",
+"input": "<span lang=sot>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mkd'",
+"input": "<span lang=mkd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wak'",
+"input": "<span lang=wak>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'her'",
+"input": "<span lang=her>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lol'",
+"input": "<span lang=lol>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mkh'",
+"input": "<span lang=mkh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'heb'",
+"input": "<span lang=heb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'loz'",
+"input": "<span lang=loz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gil'",
+"input": "<span lang=gil>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'was'",
+"input": "<span lang=was>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'war'",
+"input": "<span lang=war>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hz'",
+"input": "<span lang=hz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hy'",
+"input": "<span lang=hy>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sid'",
+"input": "<span lang=sid>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hr'",
+"input": "<span lang=hr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ht'",
+"input": "<span lang=ht>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hu'",
+"input": "<span lang=hu>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hi'",
+"input": "<span lang=hi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ho'",
+"input": "<span lang=ho>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bul'",
+"input": "<span lang=bul>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wal'",
+"input": "<span lang=wal>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bua'",
+"input": "<span lang=bua>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bug'",
+"input": "<span lang=bug>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'he'",
+"input": "<span lang=he>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uz'",
+"input": "<span lang=uz>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aze'",
+"input": "<span lang=aze>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ur'",
+"input": "<span lang=ur>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zha'",
+"input": "<span lang=zha>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uk'",
+"input": "<span lang=uk>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ug'",
+"input": "<span lang=ug>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zho'",
+"input": "<span lang=zho>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aa'",
+"input": "<span lang=aa>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ab'",
+"input": "<span lang=ab>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ae'",
+"input": "<span lang=ae>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uig'",
+"input": "<span lang=uig>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'af'",
+"input": "<span lang=af>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ak'",
+"input": "<span lang=ak>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'am'",
+"input": "<span lang=am>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'myv'",
+"input": "<span lang=myv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'khi'",
+"input": "<span lang=khi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'as'",
+"input": "<span lang=as>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ar'",
+"input": "<span lang=ar>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'inh'",
+"input": "<span lang=inh>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'khm'",
+"input": "<span lang=khm>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'av'",
+"input": "<span lang=av>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mya'",
+"input": "<span lang=mya>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ine'",
+"input": "<span lang=ine>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'az'",
+"input": "<span lang=az>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ina'",
+"input": "<span lang=ina>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'inc'",
+"input": "<span lang=inc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nl'",
+"input": "<span lang=nl>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nn'",
+"input": "<span lang=nn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'no'",
+"input": "<span lang=no>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'na'",
+"input": "<span lang=na>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nb'",
+"input": "<span lang=nb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nai'",
+"input": "<span lang=nai>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nd'",
+"input": "<span lang=nd>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ne'",
+"input": "<span lang=ne>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tir'",
+"input": "<span lang=tir>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ng'",
+"input": "<span lang=ng>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ny'",
+"input": "<span lang=ny>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nap'",
+"input": "<span lang=nap>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gre'",
+"input": "<span lang=gre>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'grb'",
+"input": "<span lang=grb>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'grc'",
+"input": "<span lang=grc>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nau'",
+"input": "<span lang=nau>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'grn'",
+"input": "<span lang=grn>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nr'",
+"input": "<span lang=nr>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tig'",
+"input": "<span lang=tig>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yor'",
+"input": "<span lang=yor>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nv'",
+"input": "<span lang=nv>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mri'",
+"input": "<span lang=mri>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'may'",
+"input": "<span lang=may>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zun'",
+"input": "<span lang=zun>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ile'",
+"input": "<span lang=ile>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sqi'",
+"input": "<span lang=sqi>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gon'",
+"input": "<span lang=gon>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cpe'",
+"input": "<span lang=cpe>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cpf'",
+"input": "<span lang=cpf>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cpp'",
+"input": "<span lang=cpp>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'roh-foo'",
+"input": "<span lang=roh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gv-foo'",
+"input": "<span lang=gv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gu-foo'",
+"input": "<span lang=gu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'scn-foo'",
+"input": "<span lang=scn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rom-foo'",
+"input": "<span lang=rom-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ron-foo'",
+"input": "<span lang=ron-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'alg-foo'",
+"input": "<span lang=alg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oss-foo'",
+"input": "<span lang=oss-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ale-foo'",
+"input": "<span lang=ale-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'alb-foo'",
+"input": "<span lang=alb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sco-foo'",
+"input": "<span lang=sco-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'scc-foo'",
+"input": "<span lang=scc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mni-foo'",
+"input": "<span lang=mni-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gd-foo'",
+"input": "<span lang=gd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'per-foo'",
+"input": "<span lang=per-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ga-foo'",
+"input": "<span lang=ga-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nwc-foo'",
+"input": "<span lang=nwc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'osa-foo'",
+"input": "<span lang=osa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gn-foo'",
+"input": "<span lang=gn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'alt-foo'",
+"input": "<span lang=alt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gl-foo'",
+"input": "<span lang=gl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'scr-foo'",
+"input": "<span lang=scr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mwr-foo'",
+"input": "<span lang=mwr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ty-foo'",
+"input": "<span lang=ty-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tw-foo'",
+"input": "<span lang=tw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tt-foo'",
+"input": "<span lang=tt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tr-foo'",
+"input": "<span lang=tr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ts-foo'",
+"input": "<span lang=ts-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tn-foo'",
+"input": "<span lang=tn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'to-foo'",
+"input": "<span lang=to-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tl-foo'",
+"input": "<span lang=tl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tk-foo'",
+"input": "<span lang=tk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'th-foo'",
+"input": "<span lang=th-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ti-foo'",
+"input": "<span lang=ti-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ven-foo'",
+"input": "<span lang=ven-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tg-foo'",
+"input": "<span lang=tg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'te-foo'",
+"input": "<span lang=te-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uga-foo'",
+"input": "<span lang=uga-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ta-foo'",
+"input": "<span lang=ta-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fas-foo'",
+"input": "<span lang=fas-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fat-foo'",
+"input": "<span lang=fat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fan-foo'",
+"input": "<span lang=fan-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fao-foo'",
+"input": "<span lang=fao-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'got-foo'",
+"input": "<span lang=got-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sme-foo'",
+"input": "<span lang=sme-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'din-foo'",
+"input": "<span lang=din-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hye-foo'",
+"input": "<span lang=hye-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'guj-foo'",
+"input": "<span lang=guj-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cmc-foo'",
+"input": "<span lang=cmc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srd-foo'",
+"input": "<span lang=srd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mdr-foo'",
+"input": "<span lang=mdr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ml-foo'",
+"input": "<span lang=ml-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'div-foo'",
+"input": "<span lang=div-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zh-foo'",
+"input": "<span lang=zh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tem-foo'",
+"input": "<span lang=tem-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xho-foo'",
+"input": "<span lang=xho-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mno-foo'",
+"input": "<span lang=mno-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'roa-foo'",
+"input": "<span lang=roa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'za-foo'",
+"input": "<span lang=za-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'deu-foo'",
+"input": "<span lang=deu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mk-foo'",
+"input": "<span lang=mk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nbl-foo'",
+"input": "<span lang=nbl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zu-foo'",
+"input": "<span lang=zu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ter-foo'",
+"input": "<span lang=ter-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tet-foo'",
+"input": "<span lang=tet-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mnc-foo'",
+"input": "<span lang=mnc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sun-foo'",
+"input": "<span lang=sun-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'abk-foo'",
+"input": "<span lang=abk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'suk-foo'",
+"input": "<span lang=suk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kur-foo'",
+"input": "<span lang=kur-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kum-foo'",
+"input": "<span lang=kum-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'slo-foo'",
+"input": "<span lang=slo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sus-foo'",
+"input": "<span lang=sus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'new-foo'",
+"input": "<span lang=new-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kua-foo'",
+"input": "<span lang=kua-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sux-foo'",
+"input": "<span lang=sux-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'den-foo'",
+"input": "<span lang=den-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mr-foo'",
+"input": "<span lang=mr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mul-foo'",
+"input": "<span lang=mul-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lez-foo'",
+"input": "<span lang=lez-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gla-foo'",
+"input": "<span lang=gla-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bos-foo'",
+"input": "<span lang=bos-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gle-foo'",
+"input": "<span lang=gle-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eka-foo'",
+"input": "<span lang=eka-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'glg-foo'",
+"input": "<span lang=glg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'akk-foo'",
+"input": "<span lang=akk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dra-foo'",
+"input": "<span lang=dra-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aka-foo'",
+"input": "<span lang=aka-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bod-foo'",
+"input": "<span lang=bod-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'glv-foo'",
+"input": "<span lang=glv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jrb-foo'",
+"input": "<span lang=jrb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vie-foo'",
+"input": "<span lang=vie-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ipk-foo'",
+"input": "<span lang=ipk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uzb-foo'",
+"input": "<span lang=uzb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sgn-foo'",
+"input": "<span lang=sgn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sga-foo'",
+"input": "<span lang=sga-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bre-foo'",
+"input": "<span lang=bre-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'apa-foo'",
+"input": "<span lang=apa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bra-foo'",
+"input": "<span lang=bra-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aym-foo'",
+"input": "<span lang=aym-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cha-foo'",
+"input": "<span lang=cha-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chb-foo'",
+"input": "<span lang=chb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'che-foo'",
+"input": "<span lang=che-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chg-foo'",
+"input": "<span lang=chg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chi-foo'",
+"input": "<span lang=chi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chk-foo'",
+"input": "<span lang=chk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aus-foo'",
+"input": "<span lang=aus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chn-foo'",
+"input": "<span lang=chn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cho-foo'",
+"input": "<span lang=cho-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chp-foo'",
+"input": "<span lang=chp-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chr-foo'",
+"input": "<span lang=chr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chu-foo'",
+"input": "<span lang=chu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chv-foo'",
+"input": "<span lang=chv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chy-foo'",
+"input": "<span lang=chy-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'msa-foo'",
+"input": "<span lang=msa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iii-foo'",
+"input": "<span lang=iii-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vot-foo'",
+"input": "<span lang=vot-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mg-foo'",
+"input": "<span lang=mg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ndo-foo'",
+"input": "<span lang=ndo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ibo-foo'",
+"input": "<span lang=ibo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'car-foo'",
+"input": "<span lang=car-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mo-foo'",
+"input": "<span lang=mo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mn-foo'",
+"input": "<span lang=mn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mi-foo'",
+"input": "<span lang=mi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mh-foo'",
+"input": "<span lang=mh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cau-foo'",
+"input": "<span lang=cau-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cat-foo'",
+"input": "<span lang=cat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bur-foo'",
+"input": "<span lang=bur-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mt-foo'",
+"input": "<span lang=mt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cai-foo'",
+"input": "<span lang=cai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'del-foo'",
+"input": "<span lang=del-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ms-foo'",
+"input": "<span lang=ms-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'byn-foo'",
+"input": "<span lang=byn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mwl-foo'",
+"input": "<span lang=mwl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'my-foo'",
+"input": "<span lang=my-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cad-foo'",
+"input": "<span lang=cad-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tat-foo'",
+"input": "<span lang=tat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nde-foo'",
+"input": "<span lang=nde-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tam-foo'",
+"input": "<span lang=tam-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'spa-foo'",
+"input": "<span lang=spa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tah-foo'",
+"input": "<span lang=tah-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tai-foo'",
+"input": "<span lang=tai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cze-foo'",
+"input": "<span lang=cze-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'afh-foo'",
+"input": "<span lang=afh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eng-foo'",
+"input": "<span lang=eng-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'afa-foo'",
+"input": "<span lang=afa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'id-foo'",
+"input": "<span lang=id-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nyn-foo'",
+"input": "<span lang=nyn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nyo-foo'",
+"input": "<span lang=nyo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gez-foo'",
+"input": "<span lang=gez-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nya-foo'",
+"input": "<span lang=nya-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sio-foo'",
+"input": "<span lang=sio-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sin-foo'",
+"input": "<span lang=sin-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'afr-foo'",
+"input": "<span lang=afr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'map-foo'",
+"input": "<span lang=map-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fr-foo'",
+"input": "<span lang=fr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lao-foo'",
+"input": "<span lang=lao-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lah-foo'",
+"input": "<span lang=lah-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nym-foo'",
+"input": "<span lang=nym-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sna-foo'",
+"input": "<span lang=sna-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lad-foo'",
+"input": "<span lang=lad-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fy-foo'",
+"input": "<span lang=fy-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'snk-foo'",
+"input": "<span lang=snk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fa-foo'",
+"input": "<span lang=fa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mac-foo'",
+"input": "<span lang=mac-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mad-foo'",
+"input": "<span lang=mad-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ff-foo'",
+"input": "<span lang=ff-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lat-foo'",
+"input": "<span lang=lat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fi-foo'",
+"input": "<span lang=fi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fj-foo'",
+"input": "<span lang=fj-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mal-foo'",
+"input": "<span lang=mal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mao-foo'",
+"input": "<span lang=mao-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fo-foo'",
+"input": "<span lang=fo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mak-foo'",
+"input": "<span lang=mak-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'egy-foo'",
+"input": "<span lang=egy-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'znd-foo'",
+"input": "<span lang=znd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ss-foo'",
+"input": "<span lang=ss-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sr-foo'",
+"input": "<span lang=sr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sq-foo'",
+"input": "<span lang=sq-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sit-foo'",
+"input": "<span lang=sit-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sw-foo'",
+"input": "<span lang=sw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sv-foo'",
+"input": "<span lang=sv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'su-foo'",
+"input": "<span lang=su-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'st-foo'",
+"input": "<span lang=st-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sk-foo'",
+"input": "<span lang=sk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'si-foo'",
+"input": "<span lang=si-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sh-foo'",
+"input": "<span lang=sh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'so-foo'",
+"input": "<span lang=so-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sn-foo'",
+"input": "<span lang=sn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sm-foo'",
+"input": "<span lang=sm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sl-foo'",
+"input": "<span lang=sl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sc-foo'",
+"input": "<span lang=sc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sa-foo'",
+"input": "<span lang=sa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sg-foo'",
+"input": "<span lang=sg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'se-foo'",
+"input": "<span lang=se-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sd-foo'",
+"input": "<span lang=sd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zen-foo'",
+"input": "<span lang=zen-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kbd-foo'",
+"input": "<span lang=kbd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'enm-foo'",
+"input": "<span lang=enm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iro-foo'",
+"input": "<span lang=iro-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vai-foo'",
+"input": "<span lang=vai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'csb-foo'",
+"input": "<span lang=csb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tsn-foo'",
+"input": "<span lang=tsn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lg-foo'",
+"input": "<span lang=lg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lb-foo'",
+"input": "<span lang=lb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'la-foo'",
+"input": "<span lang=la-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ln-foo'",
+"input": "<span lang=ln-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lo-foo'",
+"input": "<span lang=lo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'li-foo'",
+"input": "<span lang=li-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lv-foo'",
+"input": "<span lang=lv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lt-foo'",
+"input": "<span lang=lt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lu-foo'",
+"input": "<span lang=lu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hin-foo'",
+"input": "<span lang=hin-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fij-foo'",
+"input": "<span lang=fij-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fin-foo'",
+"input": "<span lang=fin-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eus-foo'",
+"input": "<span lang=eus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yi-foo'",
+"input": "<span lang=yi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'non-foo'",
+"input": "<span lang=non-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ceb-foo'",
+"input": "<span lang=ceb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yo-foo'",
+"input": "<span lang=yo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dan-foo'",
+"input": "<span lang=dan-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cel-foo'",
+"input": "<span lang=cel-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bat-foo'",
+"input": "<span lang=bat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nob-foo'",
+"input": "<span lang=nob-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dak-foo'",
+"input": "<span lang=dak-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ces-foo'",
+"input": "<span lang=ces-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dar-foo'",
+"input": "<span lang=dar-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'son-foo'",
+"input": "<span lang=son-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'day-foo'",
+"input": "<span lang=day-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nor-foo'",
+"input": "<span lang=nor-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gba-foo'",
+"input": "<span lang=gba-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ssa-foo'",
+"input": "<span lang=ssa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hil-foo'",
+"input": "<span lang=hil-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kpe-foo'",
+"input": "<span lang=kpe-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'man-foo'",
+"input": "<span lang=man-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wel-foo'",
+"input": "<span lang=wel-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'el-foo'",
+"input": "<span lang=el-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eo-foo'",
+"input": "<span lang=eo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'en-foo'",
+"input": "<span lang=en-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lam-foo'",
+"input": "<span lang=lam-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ee-foo'",
+"input": "<span lang=ee-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tpi-foo'",
+"input": "<span lang=tpi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mdf-foo'",
+"input": "<span lang=mdf-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mas-foo'",
+"input": "<span lang=mas-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mar-foo'",
+"input": "<span lang=mar-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'eu-foo'",
+"input": "<span lang=eu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'et-foo'",
+"input": "<span lang=et-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'es-foo'",
+"input": "<span lang=es-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ru-foo'",
+"input": "<span lang=ru-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rw-foo'",
+"input": "<span lang=rw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'goh-foo'",
+"input": "<span lang=goh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sms-foo'",
+"input": "<span lang=sms-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smo-foo'",
+"input": "<span lang=smo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smn-foo'",
+"input": "<span lang=smn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smj-foo'",
+"input": "<span lang=smj-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'smi-foo'",
+"input": "<span lang=smi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nic-foo'",
+"input": "<span lang=nic-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rm-foo'",
+"input": "<span lang=rm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rn-foo'",
+"input": "<span lang=rn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ro-foo'",
+"input": "<span lang=ro-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dsb-foo'",
+"input": "<span lang=dsb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sma-foo'",
+"input": "<span lang=sma-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gor-foo'",
+"input": "<span lang=gor-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ast-foo'",
+"input": "<span lang=ast-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'orm-foo'",
+"input": "<span lang=orm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'que-foo'",
+"input": "<span lang=que-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ori-foo'",
+"input": "<span lang=ori-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'crh-foo'",
+"input": "<span lang=crh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'asm-foo'",
+"input": "<span lang=asm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pus-foo'",
+"input": "<span lang=pus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kik-foo'",
+"input": "<span lang=kik-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ltz-foo'",
+"input": "<span lang=ltz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ath-foo'",
+"input": "<span lang=ath-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wln-foo'",
+"input": "<span lang=wln-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'isl-foo'",
+"input": "<span lang=isl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xh-foo'",
+"input": "<span lang=xh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mag-foo'",
+"input": "<span lang=mag-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mai-foo'",
+"input": "<span lang=mai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xx-foo'",
+"input": "<span lang=xx-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mah-foo'",
+"input": "<span lang=mah-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tel-foo'",
+"input": "<span lang=tel-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lav-foo'",
+"input": "<span lang=lav-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zap-foo'",
+"input": "<span lang=zap-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yid-foo'",
+"input": "<span lang=yid-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kok-foo'",
+"input": "<span lang=kok-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kom-foo'",
+"input": "<span lang=kom-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kon-foo'",
+"input": "<span lang=kon-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ukr-foo'",
+"input": "<span lang=ukr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ton-foo'",
+"input": "<span lang=ton-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kos-foo'",
+"input": "<span lang=kos-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kor-foo'",
+"input": "<span lang=kor-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tog-foo'",
+"input": "<span lang=tog-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hun-foo'",
+"input": "<span lang=hun-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hup-foo'",
+"input": "<span lang=hup-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cym-foo'",
+"input": "<span lang=cym-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'udm-foo'",
+"input": "<span lang=udm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bej-foo'",
+"input": "<span lang=bej-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ben-foo'",
+"input": "<span lang=ben-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bel-foo'",
+"input": "<span lang=bel-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bem-foo'",
+"input": "<span lang=bem-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tsi-foo'",
+"input": "<span lang=tsi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aar-foo'",
+"input": "<span lang=aar-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ber-foo'",
+"input": "<span lang=ber-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nzi-foo'",
+"input": "<span lang=nzi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sai-foo'",
+"input": "<span lang=sai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ang-foo'",
+"input": "<span lang=ang-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pra-foo'",
+"input": "<span lang=pra-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'san-foo'",
+"input": "<span lang=san-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bho-foo'",
+"input": "<span lang=bho-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sal-foo'",
+"input": "<span lang=sal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pro-foo'",
+"input": "<span lang=pro-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'raj-foo'",
+"input": "<span lang=raj-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sad-foo'",
+"input": "<span lang=sad-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'anp-foo'",
+"input": "<span lang=anp-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rap-foo'",
+"input": "<span lang=rap-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sas-foo'",
+"input": "<span lang=sas-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iba-foo'",
+"input": "<span lang=iba-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'myn-foo'",
+"input": "<span lang=myn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'min-foo'",
+"input": "<span lang=min-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lim-foo'",
+"input": "<span lang=lim-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lin-foo'",
+"input": "<span lang=lin-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nah-foo'",
+"input": "<span lang=nah-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lit-foo'",
+"input": "<span lang=lit-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'efi-foo'",
+"input": "<span lang=efi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srn-foo'",
+"input": "<span lang=srn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arn-foo'",
+"input": "<span lang=arn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ypk-foo'",
+"input": "<span lang=ypk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mis-foo'",
+"input": "<span lang=mis-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kac-foo'",
+"input": "<span lang=kac-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kab-foo'",
+"input": "<span lang=kab-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kaa-foo'",
+"input": "<span lang=kaa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kan-foo'",
+"input": "<span lang=kan-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kam-foo'",
+"input": "<span lang=kam-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kal-foo'",
+"input": "<span lang=kal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kas-foo'",
+"input": "<span lang=kas-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kar-foo'",
+"input": "<span lang=kar-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kaw-foo'",
+"input": "<span lang=kaw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kau-foo'",
+"input": "<span lang=kau-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kat-foo'",
+"input": "<span lang=kat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kaz-foo'",
+"input": "<span lang=kaz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tyv-foo'",
+"input": "<span lang=tyv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'awa-foo'",
+"input": "<span lang=awa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'urd-foo'",
+"input": "<span lang=urd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ka-foo'",
+"input": "<span lang=ka-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'doi-foo'",
+"input": "<span lang=doi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kg-foo'",
+"input": "<span lang=kg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kk-foo'",
+"input": "<span lang=kk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kj-foo'",
+"input": "<span lang=kj-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ki-foo'",
+"input": "<span lang=ki-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ko-foo'",
+"input": "<span lang=ko-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kn-foo'",
+"input": "<span lang=kn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'km-foo'",
+"input": "<span lang=km-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kl-foo'",
+"input": "<span lang=kl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ks-foo'",
+"input": "<span lang=ks-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kr-foo'",
+"input": "<span lang=kr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kw-foo'",
+"input": "<span lang=kw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kv-foo'",
+"input": "<span lang=kv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ku-foo'",
+"input": "<span lang=ku-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ky-foo'",
+"input": "<span lang=ky-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ota-foo'",
+"input": "<span lang=ota-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kut-foo'",
+"input": "<span lang=kut-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tkl-foo'",
+"input": "<span lang=tkl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nld-foo'",
+"input": "<span lang=nld-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oji-foo'",
+"input": "<span lang=oji-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oci-foo'",
+"input": "<span lang=oci-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ha-foo'",
+"input": "<span lang=ha-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wol-foo'",
+"input": "<span lang=wol-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jav-foo'",
+"input": "<span lang=jav-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hrv-foo'",
+"input": "<span lang=hrv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ger-foo'",
+"input": "<span lang=ger-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mga-foo'",
+"input": "<span lang=mga-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hit-foo'",
+"input": "<span lang=hit-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dyu-foo'",
+"input": "<span lang=dyu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ssw-foo'",
+"input": "<span lang=ssw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'de-foo'",
+"input": "<span lang=de-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'da-foo'",
+"input": "<span lang=da-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dz-foo'",
+"input": "<span lang=dz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lui-foo'",
+"input": "<span lang=lui-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dv-foo'",
+"input": "<span lang=dv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ita-foo'",
+"input": "<span lang=ita-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'geo-foo'",
+"input": "<span lang=geo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'him-foo'",
+"input": "<span lang=him-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gem-foo'",
+"input": "<span lang=gem-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'crp-foo'",
+"input": "<span lang=crp-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'qu-foo'",
+"input": "<span lang=qu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bas-foo'",
+"input": "<span lang=bas-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'baq-foo'",
+"input": "<span lang=baq-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bad-foo'",
+"input": "<span lang=bad-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nep-foo'",
+"input": "<span lang=nep-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cre-foo'",
+"input": "<span lang=cre-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ban-foo'",
+"input": "<span lang=ban-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'x-foo'",
+"input": "<span lang=x-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bal-foo'",
+"input": "<span lang=bal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bam-foo'",
+"input": "<span lang=bam-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bak-foo'",
+"input": "<span lang=bak-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'shn-foo'",
+"input": "<span lang=shn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bai-foo'",
+"input": "<span lang=bai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arp-foo'",
+"input": "<span lang=arp-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'art-foo'",
+"input": "<span lang=art-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arw-foo'",
+"input": "<span lang=arw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'qtz-foo'",
+"input": "<span lang=qtz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ara-foo'",
+"input": "<span lang=ara-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arc-foo'",
+"input": "<span lang=arc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arg-foo'",
+"input": "<span lang=arg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sem-foo'",
+"input": "<span lang=sem-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sel-foo'",
+"input": "<span lang=sel-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nub-foo'",
+"input": "<span lang=nub-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'arm-foo'",
+"input": "<span lang=arm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'btk-foo'",
+"input": "<span lang=btk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lus-foo'",
+"input": "<span lang=lus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iku-foo'",
+"input": "<span lang=iku-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mus-foo'",
+"input": "<span lang=mus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lua-foo'",
+"input": "<span lang=lua-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lub-foo'",
+"input": "<span lang=lub-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lug-foo'",
+"input": "<span lang=lug-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ira-foo'",
+"input": "<span lang=ira-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mun-foo'",
+"input": "<span lang=mun-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tur-foo'",
+"input": "<span lang=tur-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lun-foo'",
+"input": "<span lang=lun-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'luo-foo'",
+"input": "<span lang=luo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'men-foo'",
+"input": "<span lang=men-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wa-foo'",
+"input": "<span lang=wa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tso-foo'",
+"input": "<span lang=tso-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wo-foo'",
+"input": "<span lang=wo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jv-foo'",
+"input": "<span lang=jv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tut-foo'",
+"input": "<span lang=tut-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ind-foo'",
+"input": "<span lang=ind-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tuk-foo'",
+"input": "<span lang=tuk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tum-foo'",
+"input": "<span lang=tum-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ja-foo'",
+"input": "<span lang=ja-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cop-foo'",
+"input": "<span lang=cop-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cos-foo'",
+"input": "<span lang=cos-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cor-foo'",
+"input": "<span lang=cor-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ilo-foo'",
+"input": "<span lang=ilo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fiu-foo'",
+"input": "<span lang=fiu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gwi-foo'",
+"input": "<span lang=gwi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'und-foo'",
+"input": "<span lang=und-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gws-foo'",
+"input": "<span lang=gws-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tli-foo'",
+"input": "<span lang=tli-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tlh-foo'",
+"input": "<span lang=tlh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nno-foo'",
+"input": "<span lang=nno-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ch-foo'",
+"input": "<span lang=ch-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'co-foo'",
+"input": "<span lang=co-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ca-foo'",
+"input": "<span lang=ca-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'por-foo'",
+"input": "<span lang=por-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ce-foo'",
+"input": "<span lang=ce-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pon-foo'",
+"input": "<span lang=pon-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cy-foo'",
+"input": "<span lang=cy-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sah-foo'",
+"input": "<span lang=sah-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cs-foo'",
+"input": "<span lang=cs-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cr-foo'",
+"input": "<span lang=cr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ady-foo'",
+"input": "<span lang=ady-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cv-foo'",
+"input": "<span lang=cv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cu-foo'",
+"input": "<span lang=cu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ps-foo'",
+"input": "<span lang=ps-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fra-foo'",
+"input": "<span lang=fra-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dum-foo'",
+"input": "<span lang=dum-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pt-foo'",
+"input": "<span lang=pt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'swa-foo'",
+"input": "<span lang=swa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dua-foo'",
+"input": "<span lang=dua-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fro-foo'",
+"input": "<span lang=fro-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yap-foo'",
+"input": "<span lang=yap-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'frm-foo'",
+"input": "<span lang=frm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tiv-foo'",
+"input": "<span lang=tiv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'frs-foo'",
+"input": "<span lang=frs-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'frr-foo'",
+"input": "<span lang=frr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yao-foo'",
+"input": "<span lang=yao-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pa-foo'",
+"input": "<span lang=pa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'xal-foo'",
+"input": "<span lang=xal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fry-foo'",
+"input": "<span lang=fry-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pi-foo'",
+"input": "<span lang=pi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dut-foo'",
+"input": "<span lang=dut-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pl-foo'",
+"input": "<span lang=pl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gay-foo'",
+"input": "<span lang=gay-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oto-foo'",
+"input": "<span lang=oto-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sag-foo'",
+"input": "<span lang=sag-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hmn-foo'",
+"input": "<span lang=hmn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hmo-foo'",
+"input": "<span lang=hmo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'an-foo'",
+"input": "<span lang=an-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gaa-foo'",
+"input": "<span lang=gaa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fur-foo'",
+"input": "<span lang=fur-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mlg-foo'",
+"input": "<span lang=mlg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'slv-foo'",
+"input": "<span lang=slv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ain-foo'",
+"input": "<span lang=ain-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fil-foo'",
+"input": "<span lang=fil-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mlt-foo'",
+"input": "<span lang=mlt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'slk-foo'",
+"input": "<span lang=slk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rar-foo'",
+"input": "<span lang=rar-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ful-foo'",
+"input": "<span lang=ful-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sla-foo'",
+"input": "<span lang=sla-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 've-foo'",
+"input": "<span lang=ve-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jpn-foo'",
+"input": "<span lang=jpn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vol-foo'",
+"input": "<span lang=vol-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vi-foo'",
+"input": "<span lang=vi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'is-foo'",
+"input": "<span lang=is-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kho-foo'",
+"input": "<span lang=kho-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'iu-foo'",
+"input": "<span lang=iu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'it-foo'",
+"input": "<span lang=it-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'vo-foo'",
+"input": "<span lang=vo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ii-foo'",
+"input": "<span lang=ii-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ay-foo'",
+"input": "<span lang=ay-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ik-foo'",
+"input": "<span lang=ik-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'io-foo'",
+"input": "<span lang=io-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kha-foo'",
+"input": "<span lang=kha-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ia-foo'",
+"input": "<span lang=ia-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ave-foo'",
+"input": "<span lang=ave-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jpr-foo'",
+"input": "<span lang=jpr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ie-foo'",
+"input": "<span lang=ie-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ava-foo'",
+"input": "<span lang=ava-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ig-foo'",
+"input": "<span lang=ig-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pap-foo'",
+"input": "<span lang=pap-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ewo-foo'",
+"input": "<span lang=ewo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pau-foo'",
+"input": "<span lang=pau-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ewe-foo'",
+"input": "<span lang=ewe-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'paa-foo'",
+"input": "<span lang=paa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'qaa-foo'",
+"input": "<span lang=qaa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pag-foo'",
+"input": "<span lang=pag-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sat-foo'",
+"input": "<span lang=sat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pal-foo'",
+"input": "<span lang=pal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pam-foo'",
+"input": "<span lang=pam-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pan-foo'",
+"input": "<span lang=pan-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'phi-foo'",
+"input": "<span lang=phi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nog-foo'",
+"input": "<span lang=nog-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'phn-foo'",
+"input": "<span lang=phn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kir-foo'",
+"input": "<span lang=kir-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nia-foo'",
+"input": "<span lang=nia-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dgr-foo'",
+"input": "<span lang=dgr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'syr-foo'",
+"input": "<span lang=syr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kin-foo'",
+"input": "<span lang=kin-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tup-foo'",
+"input": "<span lang=tup-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'niu-foo'",
+"input": "<span lang=niu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gsw-foo'",
+"input": "<span lang=gsw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'epo-foo'",
+"input": "<span lang=epo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'jbo-foo'",
+"input": "<span lang=jbo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mic-foo'",
+"input": "<span lang=mic-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tha-foo'",
+"input": "<span lang=tha-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sam-foo'",
+"input": "<span lang=sam-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hai-foo'",
+"input": "<span lang=hai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gmh-foo'",
+"input": "<span lang=gmh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cus-foo'",
+"input": "<span lang=cus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ell-foo'",
+"input": "<span lang=ell-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wen-foo'",
+"input": "<span lang=wen-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bnt-foo'",
+"input": "<span lang=bnt-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fre-foo'",
+"input": "<span lang=fre-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'elx-foo'",
+"input": "<span lang=elx-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ada-foo'",
+"input": "<span lang=ada-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nav-foo'",
+"input": "<span lang=nav-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hat-foo'",
+"input": "<span lang=hat-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hau-foo'",
+"input": "<span lang=hau-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'haw-foo'",
+"input": "<span lang=haw-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bin-foo'",
+"input": "<span lang=bin-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'amh-foo'",
+"input": "<span lang=amh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bik-foo'",
+"input": "<span lang=bik-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bih-foo'",
+"input": "<span lang=bih-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mos-foo'",
+"input": "<span lang=mos-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'moh-foo'",
+"input": "<span lang=moh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mon-foo'",
+"input": "<span lang=mon-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mol-foo'",
+"input": "<span lang=mol-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bis-foo'",
+"input": "<span lang=bis-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bla-foo'",
+"input": "<span lang=bla-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pol-foo'",
+"input": "<span lang=pol-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tib-foo'",
+"input": "<span lang=tib-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tvl-foo'",
+"input": "<span lang=tvl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tgk-foo'",
+"input": "<span lang=tgk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ijo-foo'",
+"input": "<span lang=ijo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'est-foo'",
+"input": "<span lang=est-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kmb-foo'",
+"input": "<span lang=kmb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ice-foo'",
+"input": "<span lang=ice-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'peo-foo'",
+"input": "<span lang=peo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tgl-foo'",
+"input": "<span lang=tgl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'umb-foo'",
+"input": "<span lang=umb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tmh-foo'",
+"input": "<span lang=tmh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'fon-foo'",
+"input": "<span lang=fon-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hsb-foo'",
+"input": "<span lang=hsb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'be-foo'",
+"input": "<span lang=be-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bg-foo'",
+"input": "<span lang=bg-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'run-foo'",
+"input": "<span lang=run-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ba-foo'",
+"input": "<span lang=ba-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rum-foo'",
+"input": "<span lang=rum-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bm-foo'",
+"input": "<span lang=bm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bn-foo'",
+"input": "<span lang=bn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bo-foo'",
+"input": "<span lang=bo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bh-foo'",
+"input": "<span lang=bh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bi-foo'",
+"input": "<span lang=bi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'br-foo'",
+"input": "<span lang=br-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bs-foo'",
+"input": "<span lang=bs-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rus-foo'",
+"input": "<span lang=rus-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'rup-foo'",
+"input": "<span lang=rup-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'pli-foo'",
+"input": "<span lang=pli-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'om-foo'",
+"input": "<span lang=om-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oj-foo'",
+"input": "<span lang=oj-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ace-foo'",
+"input": "<span lang=ace-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ach-foo'",
+"input": "<span lang=ach-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'oc-foo'",
+"input": "<span lang=oc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'dzo-foo'",
+"input": "<span lang=dzo-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kru-foo'",
+"input": "<span lang=kru-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srr-foo'",
+"input": "<span lang=srr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ido-foo'",
+"input": "<span lang=ido-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'srp-foo'",
+"input": "<span lang=srp-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'kro-foo'",
+"input": "<span lang=kro-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'krl-foo'",
+"input": "<span lang=krl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'krc-foo'",
+"input": "<span lang=krc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nds-foo'",
+"input": "<span lang=nds-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'os-foo'",
+"input": "<span lang=os-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'or-foo'",
+"input": "<span lang=or-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zul-foo'",
+"input": "<span lang=zul-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'twi-foo'",
+"input": "<span lang=twi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sog-foo'",
+"input": "<span lang=sog-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nso-foo'",
+"input": "<span lang=nso-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'swe-foo'",
+"input": "<span lang=swe-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'som-foo'",
+"input": "<span lang=som-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'chm-foo'",
+"input": "<span lang=chm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'snd-foo'",
+"input": "<span lang=snd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sot-foo'",
+"input": "<span lang=sot-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mkd-foo'",
+"input": "<span lang=mkd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wak-foo'",
+"input": "<span lang=wak-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'her-foo'",
+"input": "<span lang=her-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'lol-foo'",
+"input": "<span lang=lol-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mkh-foo'",
+"input": "<span lang=mkh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'heb-foo'",
+"input": "<span lang=heb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'loz-foo'",
+"input": "<span lang=loz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gil-foo'",
+"input": "<span lang=gil-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'was-foo'",
+"input": "<span lang=was-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'war-foo'",
+"input": "<span lang=war-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hz-foo'",
+"input": "<span lang=hz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hy-foo'",
+"input": "<span lang=hy-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sid-foo'",
+"input": "<span lang=sid-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hr-foo'",
+"input": "<span lang=hr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ht-foo'",
+"input": "<span lang=ht-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hu-foo'",
+"input": "<span lang=hu-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'hi-foo'",
+"input": "<span lang=hi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ho-foo'",
+"input": "<span lang=ho-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bul-foo'",
+"input": "<span lang=bul-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'wal-foo'",
+"input": "<span lang=wal-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bua-foo'",
+"input": "<span lang=bua-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'bug-foo'",
+"input": "<span lang=bug-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'he-foo'",
+"input": "<span lang=he-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uz-foo'",
+"input": "<span lang=uz-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aze-foo'",
+"input": "<span lang=aze-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ur-foo'",
+"input": "<span lang=ur-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zha-foo'",
+"input": "<span lang=zha-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uk-foo'",
+"input": "<span lang=uk-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ug-foo'",
+"input": "<span lang=ug-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zho-foo'",
+"input": "<span lang=zho-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'aa-foo'",
+"input": "<span lang=aa-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ab-foo'",
+"input": "<span lang=ab-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ae-foo'",
+"input": "<span lang=ae-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'uig-foo'",
+"input": "<span lang=uig-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'af-foo'",
+"input": "<span lang=af-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ak-foo'",
+"input": "<span lang=ak-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'am-foo'",
+"input": "<span lang=am-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'myv-foo'",
+"input": "<span lang=myv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'khi-foo'",
+"input": "<span lang=khi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'as-foo'",
+"input": "<span lang=as-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ar-foo'",
+"input": "<span lang=ar-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'inh-foo'",
+"input": "<span lang=inh-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'khm-foo'",
+"input": "<span lang=khm-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'av-foo'",
+"input": "<span lang=av-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mya-foo'",
+"input": "<span lang=mya-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ine-foo'",
+"input": "<span lang=ine-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'az-foo'",
+"input": "<span lang=az-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ina-foo'",
+"input": "<span lang=ina-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'inc-foo'",
+"input": "<span lang=inc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nl-foo'",
+"input": "<span lang=nl-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nn-foo'",
+"input": "<span lang=nn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'no-foo'",
+"input": "<span lang=no-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'na-foo'",
+"input": "<span lang=na-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nb-foo'",
+"input": "<span lang=nb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nai-foo'",
+"input": "<span lang=nai-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nd-foo'",
+"input": "<span lang=nd-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ne-foo'",
+"input": "<span lang=ne-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tir-foo'",
+"input": "<span lang=tir-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ng-foo'",
+"input": "<span lang=ng-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ny-foo'",
+"input": "<span lang=ny-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nap-foo'",
+"input": "<span lang=nap-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gre-foo'",
+"input": "<span lang=gre-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'grb-foo'",
+"input": "<span lang=grb-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'grc-foo'",
+"input": "<span lang=grc-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nau-foo'",
+"input": "<span lang=nau-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'grn-foo'",
+"input": "<span lang=grn-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nr-foo'",
+"input": "<span lang=nr-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'tig-foo'",
+"input": "<span lang=tig-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'yor-foo'",
+"input": "<span lang=yor-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'nv-foo'",
+"input": "<span lang=nv-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'mri-foo'",
+"input": "<span lang=mri-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'may-foo'",
+"input": "<span lang=may-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'zun-foo'",
+"input": "<span lang=zun-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'ile-foo'",
+"input": "<span lang=ile-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'sqi-foo'",
+"input": "<span lang=sqi-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'gon-foo'",
+"input": "<span lang=gon-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cpe-foo'",
+"input": "<span lang=cpe-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cpf-foo'",
+"input": "<span lang=cpf-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'cpp-foo'",
+"input": "<span lang=cpp-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "invalid lang attribute 'foo'",
+"input": "<span lang=foo>",
+"fail-unless": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'EN'",
+"input": "<span lang=EN>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "valid lang attribute 'EN-foo'",
+"input": "<span lang=EN-foo>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "lang attribute can be blank",
+"input": "<span lang>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "lang attribute can be blank (with quotes)",
+"input": "<span lang=''>",
+"fail-if": "invalid-lang-code"},
+
+{"description": "lang attribute can not be just space characters",
+"input": "<span lang=' '>",
+"fail-unless": "invalid-lang-code"},
+
+{"description": "lang attribute can not contain leading spaces",
+"input": "<span lang=' en'>",
+"fail-unless": "invalid-lang-code"},
+
+{"description": "lang attribute can not contain trailing spaces",
+"input": "<span lang='en '>",
+"fail-unless": "invalid-lang-code"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/li-value-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/li-value-attribute.test
new file mode 100755
index 000000000..9ef9dcc56
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/li-value-attribute.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description": "invalid li value attribute value due to leading junk",
+"input": "<li value=a1>",
+"fail-unless": "invalid-integer-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-href-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-href-attribute.test
new file mode 100755
index 000000000..950c3786f
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-href-attribute.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description": "link href contains invalid URI due to space in domain",
+"input": "<link href='http://www.example. com/'",
+"fail-unless": "invalid-uri-char"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-hreflang-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-hreflang-attribute.test
new file mode 100755
index 000000000..de39d4da0
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-hreflang-attribute.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description": "invalid lang attribute 'foo'",
+"input": "<link hreflang=foo>",
+"fail-unless": "invalid-lang-code"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-rel-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-rel-attribute.test
new file mode 100755
index 000000000..a436751da
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/link-rel-attribute.test
@@ -0,0 +1,271 @@
+{"tests": [
+
+{"description": "invalid link rel value 'foo'",
+"input": "<link rel=foo>",
+"fail-unless": "invalid-rel"},
+
+{"description": "valid link rel value 'alternate stylesheet'",
+"input": "<link rel='alternate stylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading space",
+"input": "<link rel=' stylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with trailing space",
+"input": "<link rel='stylesheet '>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading and trailing space",
+"input": "<link rel=' stylesheet '>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading tab",
+"input": "<link rel='\tstylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with trailing tab",
+"input": "<link rel='stylesheet\t'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading and trailing tab",
+"input": "<link rel='\tstylesheet\t'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading LF",
+"input": "<link rel='\nstylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with trailing LF",
+"input": "<link rel='stylesheet\n'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading and trailing LF",
+"input": "<link rel='\nstylesheet\n'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading LT",
+"input": "<link rel='\u000Bstylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with trailing LT",
+"input": "<link rel='stylesheet\u000B'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading and trailing LT",
+"input": "<link rel='\u000Bstylesheet\u000B'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading FF",
+"input": "<link rel='\u000Cstylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with trailing FF",
+"input": "<link rel='stylesheet\u000C'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading and trailing FF",
+"input": "<link rel='\u000Cstylesheet\u000C'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading CR",
+"input": "<link rel='\rstylesheet'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with trailing CR",
+"input": "<link rel='stylesheet\r'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid single link rel value with leading and trailing CR",
+"input": "<link rel='\rstylesheet\r'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid double link rel value separated by space",
+"input": "<link rel='stylesheet alternate'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid double link rel value separated by tab",
+"input": "<link rel='stylesheet\talternate'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid double link rel value separated by LF",
+"input": "<link rel='stylesheet\nalternate'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid double link rel value separated by LT",
+"input": "<link rel='stylesheet\u000Balternate'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid double link rel value separated by FF",
+"input": "<link rel='stylesheet\u000Calternate'>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid double link rel value separated by CR",
+"input": "<link rel='stylesheet\ralternate'>",
+"fail-if": "invalid-rel"},
+
+{"description": "invalid duplicated link rel value separated by space",
+"input": "<link rel='stylesheet stylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by tab",
+"input": "<link rel='stylesheet\tstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by LF",
+"input": "<link rel='stylesheet\nstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by LT",
+"input": "<link rel='stylesheet\u000Bstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by FF",
+"input": "<link rel='stylesheet\u000Cstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by CR",
+"input": "<link rel='stylesheet\rstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by space",
+"input": "<link rel='stylesheet stylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by tab",
+"input": "<link rel='stylesheet\tstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by LF",
+"input": "<link rel='stylesheet\nstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by LT",
+"input": "<link rel='stylesheet\u000Bstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by FF",
+"input": "<link rel='stylesheet\u000Cstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "invalid duplicated link rel value separated by CR",
+"input": "<link rel='stylesheet\rstylesheet'>",
+"fail-unless": "duplicate-value-in-token-list"},
+
+{"description": "valid link rel value 'feed'",
+"input": "<link rel=feed>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'help'",
+"input": "<link rel=help>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'tag'",
+"input": "<link rel=tag>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'sidebar'",
+"input": "<link rel=sidebar>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'archive'",
+"input": "<link rel=archive>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'contents'",
+"input": "<link rel=contents>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'previous'",
+"input": "<link rel=previous>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'index'",
+"input": "<link rel=index>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'end'",
+"input": "<link rel=end>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'copyright'",
+"input": "<link rel=copyright>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'author'",
+"input": "<link rel=author>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'top'",
+"input": "<link rel=top>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'alternate'",
+"input": "<link rel=alternate>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'next'",
+"input": "<link rel=next>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'start'",
+"input": "<link rel=start>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'stylesheet'",
+"input": "<link rel=stylesheet>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'archives'",
+"input": "<link rel=archives>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'toc'",
+"input": "<link rel=toc>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'prev'",
+"input": "<link rel=prev>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'begin'",
+"input": "<link rel=begin>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'icon'",
+"input": "<link rel=icon>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'search'",
+"input": "<link rel=search>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'last'",
+"input": "<link rel=last>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'license'",
+"input": "<link rel=license>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'pingback'",
+"input": "<link rel=pingback>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'up'",
+"input": "<link rel=up>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'contact'",
+"input": "<link rel=contact>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'prefetch'",
+"input": "<link rel=prefetch>",
+"fail-if": "invalid-rel"},
+
+{"description": "valid link rel value 'first'",
+"input": "<link rel=first>",
+"fail-if": "invalid-rel"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/ol-start-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/ol-start-attribute.test
new file mode 100755
index 000000000..5e18cbef4
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/ol-start-attribute.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description": "invalid ol start attribute value due to leading junk",
+"input": "<ol start=a1>",
+"fail-unless": "invalid-integer-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/starttags.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/starttags.test
new file mode 100755
index 000000000..0aa5b4d00
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/starttags.test
@@ -0,0 +1,375 @@
+{"tests": [
+
+{"description": "unknown start tag <foo>",
+"input": "<foo>",
+"fail-unless": "unknown-start-tag"},
+
+{"description": "allowed start tag <code>",
+"input": "<code>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <kbd>",
+"input": "<kbd>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <aside>",
+"input": "<aside>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <datagrid>",
+"input": "<datagrid>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <font>",
+"input": "<font>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <noscript>",
+"input": "<noscript>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <style>",
+"input": "<style>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <img>",
+"input": "<img>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <title>",
+"input": "<title>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <menu>",
+"input": "<menu>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <tr>",
+"input": "<tr>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <param>",
+"input": "<param>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <li>",
+"input": "<li>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <source>",
+"input": "<source>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <tfoot>",
+"input": "<tfoot>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <th>",
+"input": "<th>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <td>",
+"input": "<td>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <dl>",
+"input": "<dl>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <blockquote>",
+"input": "<blockquote>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <dd>",
+"input": "<dd>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <abbr>",
+"input": "<abbr>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <dt>",
+"input": "<dt>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <noembed>",
+"input": "<noembed>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <p>",
+"input": "<p>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <small>",
+"input": "<small>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <meter>",
+"input": "<meter>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <em>",
+"input": "<em>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <meta>",
+"input": "<meta>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <video>",
+"input": "<video>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <div>",
+"input": "<div>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <canvas>",
+"input": "<canvas>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <sub>",
+"input": "<sub>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <section>",
+"input": "<section>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <sup>",
+"input": "<sup>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <progress>",
+"input": "<progress>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <body>",
+"input": "<body>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <base>",
+"input": "<base>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <br>",
+"input": "<br>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <address>",
+"input": "<address>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <article>",
+"input": "<article>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <strong>",
+"input": "<strong>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <legend>",
+"input": "<legend>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <event-source>",
+"input": "<event-source>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <ol>",
+"input": "<ol>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <script>",
+"input": "<script>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <caption>",
+"input": "<caption>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <dialog>",
+"input": "<dialog>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <col>",
+"input": "<col>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <h2>",
+"input": "<h2>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <h3>",
+"input": "<h3>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <h1>",
+"input": "<h1>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <h6>",
+"input": "<h6>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <h4>",
+"input": "<h4>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <h5>",
+"input": "<h5>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <header>",
+"input": "<header>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <table>",
+"input": "<table>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <span>",
+"input": "<span>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <area>",
+"input": "<area>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <dfn>",
+"input": "<dfn>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <var>",
+"input": "<var>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <cite>",
+"input": "<cite>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <thead>",
+"input": "<thead>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <head>",
+"input": "<head>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <hr>",
+"input": "<hr>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <link>",
+"input": "<link>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <datatemplate>",
+"input": "<datatemplate>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <b>",
+"input": "<b>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <colgroup>",
+"input": "<colgroup>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <ul>",
+"input": "<ul>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <del>",
+"input": "<del>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <iframe>",
+"input": "<iframe>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <pre>",
+"input": "<pre>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <figure>",
+"input": "<figure>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <ins>",
+"input": "<ins>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <tbody>",
+"input": "<tbody>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <html>",
+"input": "<html>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <nav>",
+"input": "<nav>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <details>",
+"input": "<details>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <samp>",
+"input": "<samp>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <map>",
+"input": "<map>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <nest>",
+"input": "<nest>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <object>",
+"input": "<object>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <a>",
+"input": "<a>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <footer>",
+"input": "<footer>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <i>",
+"input": "<i>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <m>",
+"input": "<m>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <rule>",
+"input": "<rule>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <q>",
+"input": "<q>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <command>",
+"input": "<command>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <time>",
+"input": "<time>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <audio>",
+"input": "<audio>",
+"fail-if": "unknown-start-tag"},
+
+{"description": "allowed start tag <bdo>",
+"input": "<bdo>",
+"fail-if": "unknown-start-tag"}
+
+]}
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/style-scoped-attribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/style-scoped-attribute.test
new file mode 100755
index 000000000..f47795a21
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/style-scoped-attribute.test
@@ -0,0 +1,7 @@
+{"tests": [
+
+{"description": "invalid style scoped attribute value 'inherit'",
+"input": "<style scoped=inherit>",
+"fail-unless": "invalid-boolean-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/tabindexattribute.test b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/tabindexattribute.test
new file mode 100755
index 000000000..02066957b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/testdata/validator/tabindexattribute.test
@@ -0,0 +1,79 @@
+{"tests": [
+
+{"description": "valid tabindex attribute value '-1'",
+"input": "<span tabindex=-1>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value '0'",
+"input": "<span tabindex=0>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value '1'",
+"input": "<span tabindex=1>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value '32768'",
+"input": "<span tabindex=32768>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value '-32768'",
+"input": "<span tabindex=-32768>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with leading spaces",
+"input": "<span tabindex=' -32768'>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with trailing spaces",
+"input": "<span tabindex='-32768 '>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with trailing junk",
+"input": "<span tabindex='32768a'>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with trailing junk and whitespace",
+"input": "<span tabindex='32768a '>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with trailing whitespace and junk",
+"input": "<span tabindex='32768 a'>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with leading spaces",
+"input": "<span tabindex=' 32768'>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with leading spaces (with sign)",
+"input": "<span tabindex=' -32768'>",
+"fail-if": "invalid-integer-value"},
+
+{"description": "invalid tabindex attribute value (blank)",
+"input": "<span tabindex>",
+"fail-unless": "attribute-value-can-not-be-blank"},
+
+{"description": "invalid tabindex attribute value due to leading junk",
+"input": "<span tabindex=a1>",
+"fail-unless": "invalid-integer-value"},
+
+{"description": "invalid tabindex attribute value due to two hyphens",
+"input": "<span tabindex=--1>",
+"fail-unless": "invalid-integer-value"},
+
+{"description": "invalid tabindex attribute value due to non-numeric",
+"input": "<span tabindex=foo>",
+"fail-unless": "invalid-integer-value"},
+
+{"description": "invalid tabindex attribute value due to positive sign",
+"input": "<span tabindex=+1>",
+"fail-unless": "invalid-integer-value"},
+
+{"description": "invalid tabindex attribute value due to decimal point",
+"input": "<span tabindex=.1>",
+"fail-unless": "invalid-integer-value"},
+
+{"description": "valid tabindex attribute value with trailing decimal point",
+"input": "<span tabindex=1.0>",
+"fail-if": "invalid-integer-value"}
+
+]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/tokenizertotree.py b/testing/web-platform/tests/tools/html5lib/html5lib/tests/tokenizertotree.py
new file mode 100644
index 000000000..b841c76ce
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/tokenizertotree.py
@@ -0,0 +1,68 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import sys
+import os
+import json
+import re
+
+import html5lib
+from . import support
+from . import test_tokenizer
+
+p = html5lib.HTMLParser()
+
+unnamespaceExpected = re.compile(r"^(\|\s*)<html ([^>]+)>", re.M).sub
+
+
+def main(out_path):
+ if not os.path.exists(out_path):
+ sys.stderr.write("Path %s does not exist" % out_path)
+ sys.exit(1)
+
+ for filename in support.get_data_files('tokenizer', '*.test'):
+ run_file(filename, out_path)
+
+
+def run_file(filename, out_path):
+ try:
+ tests_data = json.load(open(filename, "r"))
+ except ValueError:
+ sys.stderr.write("Failed to load %s\n" % filename)
+ return
+ name = os.path.splitext(os.path.split(filename)[1])[0]
+ output_file = open(os.path.join(out_path, "tokenizer_%s.dat" % name), "w")
+
+ if 'tests' in tests_data:
+ for test_data in tests_data['tests']:
+ if 'initialStates' not in test_data:
+ test_data["initialStates"] = ["Data state"]
+
+ for initial_state in test_data["initialStates"]:
+ if initial_state != "Data state":
+ # don't support this yet
+ continue
+ test = make_test(test_data)
+ output_file.write(test)
+
+ output_file.close()
+
+
+def make_test(test_data):
+ if 'doubleEscaped' in test_data:
+ test_data = test_tokenizer.unescape_test(test_data)
+
+ rv = []
+ rv.append("#data")
+ rv.append(test_data["input"].encode("utf8"))
+ rv.append("#errors")
+ tree = p.parse(test_data["input"])
+ output = p.tree.testSerializer(tree)
+ output = "\n".join(("| " + line[3:]) if line.startswith("| ") else line
+ for line in output.split("\n"))
+ output = unnamespaceExpected(r"\1<\2>", output)
+ rv.append(output.encode("utf8"))
+ rv.append("")
+ return "\n".join(rv)
+
+if __name__ == "__main__":
+ main(sys.argv[1])
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/us-ascii.html b/testing/web-platform/tests/tools/html5lib/html5lib/tests/us-ascii.html
new file mode 100644
index 000000000..728cb6baf
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/us-ascii.html
@@ -0,0 +1,3 @@
+<!doctype html>
+<title>Test</title>
+<p>Hello World! \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tests/utf-8-bom.html b/testing/web-platform/tests/tools/html5lib/html5lib/tests/utf-8-bom.html
new file mode 100644
index 000000000..6ac5efced
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tests/utf-8-bom.html
@@ -0,0 +1,3 @@
+<!doctype html>
+<title>Test</title>
+<p>Hello World! © \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/tokenizer.py b/testing/web-platform/tests/tools/html5lib/html5lib/tokenizer.py
new file mode 100644
index 000000000..797745787
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/tokenizer.py
@@ -0,0 +1,1731 @@
+from __future__ import absolute_import, division, unicode_literals
+
+try:
+ chr = unichr # flake8: noqa
+except NameError:
+ pass
+
+from collections import deque
+
+from .constants import spaceCharacters
+from .constants import entities
+from .constants import asciiLetters, asciiUpper2Lower
+from .constants import digits, hexDigits, EOF
+from .constants import tokenTypes, tagTokenTypes
+from .constants import replacementCharacters
+
+from .inputstream import HTMLInputStream
+
+from .trie import Trie
+
+entitiesTrie = Trie(entities)
+
+
+class HTMLTokenizer(object):
+ """ This class takes care of tokenizing HTML.
+
+ * self.currentToken
+ Holds the token that is currently being processed.
+
+ * self.state
+ Holds a reference to the method to be invoked... XXX
+
+ * self.stream
+ Points to HTMLInputStream object.
+ """
+
+ def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
+ lowercaseElementName=True, lowercaseAttrName=True, parser=None):
+
+ self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
+ self.parser = parser
+
+ # Perform case conversions?
+ self.lowercaseElementName = lowercaseElementName
+ self.lowercaseAttrName = lowercaseAttrName
+
+ # Setup the initial tokenizer state
+ self.escapeFlag = False
+ self.lastFourChars = []
+ self.state = self.dataState
+ self.escape = False
+
+ # The current token being created
+ self.currentToken = None
+ super(HTMLTokenizer, self).__init__()
+
+ def __iter__(self):
+ """ This is where the magic happens.
+
+ We do our usually processing through the states and when we have a token
+ to return we yield the token which pauses processing until the next token
+ is requested.
+ """
+ self.tokenQueue = deque([])
+ # Start processing. When EOF is reached self.state will return False
+ # instead of True and the loop will terminate.
+ while self.state():
+ while self.stream.errors:
+ yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
+ while self.tokenQueue:
+ yield self.tokenQueue.popleft()
+
+ def consumeNumberEntity(self, isHex):
+ """This function returns either U+FFFD or the character based on the
+ decimal or hexadecimal representation. It also discards ";" if present.
+ If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
+ """
+
+ allowed = digits
+ radix = 10
+ if isHex:
+ allowed = hexDigits
+ radix = 16
+
+ charStack = []
+
+ # Consume all the characters that are in range while making sure we
+ # don't hit an EOF.
+ c = self.stream.char()
+ while c in allowed and c is not EOF:
+ charStack.append(c)
+ c = self.stream.char()
+
+ # Convert the set of characters consumed to an int.
+ charAsInt = int("".join(charStack), radix)
+
+ # Certain characters get replaced with others
+ if charAsInt in replacementCharacters:
+ char = replacementCharacters[charAsInt]
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "illegal-codepoint-for-numeric-entity",
+ "datavars": {"charAsInt": charAsInt}})
+ elif ((0xD800 <= charAsInt <= 0xDFFF) or
+ (charAsInt > 0x10FFFF)):
+ char = "\uFFFD"
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "illegal-codepoint-for-numeric-entity",
+ "datavars": {"charAsInt": charAsInt}})
+ else:
+ # Should speed up this check somehow (e.g. move the set to a constant)
+ if ((0x0001 <= charAsInt <= 0x0008) or
+ (0x000E <= charAsInt <= 0x001F) or
+ (0x007F <= charAsInt <= 0x009F) or
+ (0xFDD0 <= charAsInt <= 0xFDEF) or
+ charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
+ 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
+ 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
+ 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
+ 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
+ 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
+ 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
+ 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
+ 0xFFFFF, 0x10FFFE, 0x10FFFF])):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data":
+ "illegal-codepoint-for-numeric-entity",
+ "datavars": {"charAsInt": charAsInt}})
+ try:
+ # Try/except needed as UCS-2 Python builds' unichar only works
+ # within the BMP.
+ char = chr(charAsInt)
+ except ValueError:
+ v = charAsInt - 0x10000
+ char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
+
+ # Discard the ; if present. Otherwise, put it back on the queue and
+ # invoke parseError on parser.
+ if c != ";":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "numeric-entity-without-semicolon"})
+ self.stream.unget(c)
+
+ return char
+
+ def consumeEntity(self, allowedChar=None, fromAttribute=False):
+ # Initialise to the default output for when no entity is matched
+ output = "&"
+
+ charStack = [self.stream.char()]
+ if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
+ or (allowedChar is not None and allowedChar == charStack[0])):
+ self.stream.unget(charStack[0])
+
+ elif charStack[0] == "#":
+ # Read the next character to see if it's hex or decimal
+ hex = False
+ charStack.append(self.stream.char())
+ if charStack[-1] in ("x", "X"):
+ hex = True
+ charStack.append(self.stream.char())
+
+ # charStack[-1] should be the first digit
+ if (hex and charStack[-1] in hexDigits) \
+ or (not hex and charStack[-1] in digits):
+ # At least one digit found, so consume the whole number
+ self.stream.unget(charStack[-1])
+ output = self.consumeNumberEntity(hex)
+ else:
+ # No digits found
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "expected-numeric-entity"})
+ self.stream.unget(charStack.pop())
+ output = "&" + "".join(charStack)
+
+ else:
+ # At this point in the process might have named entity. Entities
+ # are stored in the global variable "entities".
+ #
+ # Consume characters and compare to these to a substring of the
+ # entity names in the list until the substring no longer matches.
+ while (charStack[-1] is not EOF):
+ if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
+ break
+ charStack.append(self.stream.char())
+
+ # At this point we have a string that starts with some characters
+ # that may match an entity
+ # Try to find the longest entity the string will match to take care
+ # of &noti for instance.
+ try:
+ entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
+ entityLength = len(entityName)
+ except KeyError:
+ entityName = None
+
+ if entityName is not None:
+ if entityName[-1] != ";":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "named-entity-without-semicolon"})
+ if (entityName[-1] != ";" and fromAttribute and
+ (charStack[entityLength] in asciiLetters or
+ charStack[entityLength] in digits or
+ charStack[entityLength] == "=")):
+ self.stream.unget(charStack.pop())
+ output = "&" + "".join(charStack)
+ else:
+ output = entities[entityName]
+ self.stream.unget(charStack.pop())
+ output += "".join(charStack[entityLength:])
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-named-entity"})
+ self.stream.unget(charStack.pop())
+ output = "&" + "".join(charStack)
+
+ if fromAttribute:
+ self.currentToken["data"][-1][1] += output
+ else:
+ if output in spaceCharacters:
+ tokenType = "SpaceCharacters"
+ else:
+ tokenType = "Characters"
+ self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
+
+ def processEntityInAttribute(self, allowedChar):
+ """This method replaces the need for "entityInAttributeValueState".
+ """
+ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
+
+ def emitCurrentToken(self):
+ """This method is a generic handler for emitting the tags. It also sets
+ the state to "data" because that's what's needed after a token has been
+ emitted.
+ """
+ token = self.currentToken
+ # Add token to the queue to be yielded
+ if (token["type"] in tagTokenTypes):
+ if self.lowercaseElementName:
+ token["name"] = token["name"].translate(asciiUpper2Lower)
+ if token["type"] == tokenTypes["EndTag"]:
+ if token["data"]:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "attributes-in-end-tag"})
+ if token["selfClosing"]:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "self-closing-flag-on-end-tag"})
+ self.tokenQueue.append(token)
+ self.state = self.dataState
+
+ # Below are the various tokenizer states worked out.
+ def dataState(self):
+ data = self.stream.char()
+ if data == "&":
+ self.state = self.entityDataState
+ elif data == "<":
+ self.state = self.tagOpenState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\u0000"})
+ elif data is EOF:
+ # Tokenization ends.
+ return False
+ elif data in spaceCharacters:
+ # Directly after emitting a token you switch back to the "data
+ # state". At that point spaceCharacters are important so they are
+ # emitted separately.
+ self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
+ data + self.stream.charsUntil(spaceCharacters, True)})
+ # No need to update lastFourChars here, since the first space will
+ # have already been appended to lastFourChars and will have broken
+ # any <!-- or --> sequences
+ else:
+ chars = self.stream.charsUntil(("&", "<", "\u0000"))
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+ data + chars})
+ return True
+
+ def entityDataState(self):
+ self.consumeEntity()
+ self.state = self.dataState
+ return True
+
+ def rcdataState(self):
+ data = self.stream.char()
+ if data == "&":
+ self.state = self.characterReferenceInRcdata
+ elif data == "<":
+ self.state = self.rcdataLessThanSignState
+ elif data == EOF:
+ # Tokenization ends.
+ return False
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ elif data in spaceCharacters:
+ # Directly after emitting a token you switch back to the "data
+ # state". At that point spaceCharacters are important so they are
+ # emitted separately.
+ self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
+ data + self.stream.charsUntil(spaceCharacters, True)})
+ # No need to update lastFourChars here, since the first space will
+ # have already been appended to lastFourChars and will have broken
+ # any <!-- or --> sequences
+ else:
+ chars = self.stream.charsUntil(("&", "<", "\u0000"))
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+ data + chars})
+ return True
+
+ def characterReferenceInRcdata(self):
+ self.consumeEntity()
+ self.state = self.rcdataState
+ return True
+
+ def rawtextState(self):
+ data = self.stream.char()
+ if data == "<":
+ self.state = self.rawtextLessThanSignState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ elif data == EOF:
+ # Tokenization ends.
+ return False
+ else:
+ chars = self.stream.charsUntil(("<", "\u0000"))
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+ data + chars})
+ return True
+
+ def scriptDataState(self):
+ data = self.stream.char()
+ if data == "<":
+ self.state = self.scriptDataLessThanSignState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ elif data == EOF:
+ # Tokenization ends.
+ return False
+ else:
+ chars = self.stream.charsUntil(("<", "\u0000"))
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+ data + chars})
+ return True
+
+ def plaintextState(self):
+ data = self.stream.char()
+ if data == EOF:
+ # Tokenization ends.
+ return False
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+ data + self.stream.charsUntil("\u0000")})
+ return True
+
+ def tagOpenState(self):
+ data = self.stream.char()
+ if data == "!":
+ self.state = self.markupDeclarationOpenState
+ elif data == "/":
+ self.state = self.closeTagOpenState
+ elif data in asciiLetters:
+ self.currentToken = {"type": tokenTypes["StartTag"],
+ "name": data, "data": [],
+ "selfClosing": False,
+ "selfClosingAcknowledged": False}
+ self.state = self.tagNameState
+ elif data == ">":
+ # XXX In theory it could be something besides a tag name. But
+ # do we really care?
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-tag-name-but-got-right-bracket"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
+ self.state = self.dataState
+ elif data == "?":
+ # XXX In theory it could be something besides a tag name. But
+ # do we really care?
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-tag-name-but-got-question-mark"})
+ self.stream.unget(data)
+ self.state = self.bogusCommentState
+ else:
+ # XXX
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-tag-name"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.stream.unget(data)
+ self.state = self.dataState
+ return True
+
+ def closeTagOpenState(self):
+ data = self.stream.char()
+ if data in asciiLetters:
+ self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
+ "data": [], "selfClosing": False}
+ self.state = self.tagNameState
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-closing-tag-but-got-right-bracket"})
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-closing-tag-but-got-eof"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+ self.state = self.dataState
+ else:
+ # XXX data can be _'_...
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-closing-tag-but-got-char",
+ "datavars": {"data": data}})
+ self.stream.unget(data)
+ self.state = self.bogusCommentState
+ return True
+
+ def tagNameState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.beforeAttributeNameState
+ elif data == ">":
+ self.emitCurrentToken()
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-tag-name"})
+ self.state = self.dataState
+ elif data == "/":
+ self.state = self.selfClosingStartTagState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["name"] += "\uFFFD"
+ else:
+ self.currentToken["name"] += data
+ # (Don't use charsUntil here, because tag names are
+ # very short and it's faster to not do anything fancy)
+ return True
+
+ def rcdataLessThanSignState(self):
+ data = self.stream.char()
+ if data == "/":
+ self.temporaryBuffer = ""
+ self.state = self.rcdataEndTagOpenState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.stream.unget(data)
+ self.state = self.rcdataState
+ return True
+
+ def rcdataEndTagOpenState(self):
+ data = self.stream.char()
+ if data in asciiLetters:
+ self.temporaryBuffer += data
+ self.state = self.rcdataEndTagNameState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+ self.stream.unget(data)
+ self.state = self.rcdataState
+ return True
+
+ def rcdataEndTagNameState(self):
+ appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+ data = self.stream.char()
+ if data in spaceCharacters and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.beforeAttributeNameState
+ elif data == "/" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.selfClosingStartTagState
+ elif data == ">" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.emitCurrentToken()
+ self.state = self.dataState
+ elif data in asciiLetters:
+ self.temporaryBuffer += data
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "</" + self.temporaryBuffer})
+ self.stream.unget(data)
+ self.state = self.rcdataState
+ return True
+
+ def rawtextLessThanSignState(self):
+ data = self.stream.char()
+ if data == "/":
+ self.temporaryBuffer = ""
+ self.state = self.rawtextEndTagOpenState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.stream.unget(data)
+ self.state = self.rawtextState
+ return True
+
+ def rawtextEndTagOpenState(self):
+ data = self.stream.char()
+ if data in asciiLetters:
+ self.temporaryBuffer += data
+ self.state = self.rawtextEndTagNameState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+ self.stream.unget(data)
+ self.state = self.rawtextState
+ return True
+
+ def rawtextEndTagNameState(self):
+ appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+ data = self.stream.char()
+ if data in spaceCharacters and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.beforeAttributeNameState
+ elif data == "/" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.selfClosingStartTagState
+ elif data == ">" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.emitCurrentToken()
+ self.state = self.dataState
+ elif data in asciiLetters:
+ self.temporaryBuffer += data
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "</" + self.temporaryBuffer})
+ self.stream.unget(data)
+ self.state = self.rawtextState
+ return True
+
+ def scriptDataLessThanSignState(self):
+ data = self.stream.char()
+ if data == "/":
+ self.temporaryBuffer = ""
+ self.state = self.scriptDataEndTagOpenState
+ elif data == "!":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
+ self.state = self.scriptDataEscapeStartState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.stream.unget(data)
+ self.state = self.scriptDataState
+ return True
+
+ def scriptDataEndTagOpenState(self):
+ data = self.stream.char()
+ if data in asciiLetters:
+ self.temporaryBuffer += data
+ self.state = self.scriptDataEndTagNameState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+ self.stream.unget(data)
+ self.state = self.scriptDataState
+ return True
+
+ def scriptDataEndTagNameState(self):
+ appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+ data = self.stream.char()
+ if data in spaceCharacters and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.beforeAttributeNameState
+ elif data == "/" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.selfClosingStartTagState
+ elif data == ">" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.emitCurrentToken()
+ self.state = self.dataState
+ elif data in asciiLetters:
+ self.temporaryBuffer += data
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "</" + self.temporaryBuffer})
+ self.stream.unget(data)
+ self.state = self.scriptDataState
+ return True
+
+ def scriptDataEscapeStartState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ self.state = self.scriptDataEscapeStartDashState
+ else:
+ self.stream.unget(data)
+ self.state = self.scriptDataState
+ return True
+
+ def scriptDataEscapeStartDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ self.state = self.scriptDataEscapedDashDashState
+ else:
+ self.stream.unget(data)
+ self.state = self.scriptDataState
+ return True
+
+ def scriptDataEscapedState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ self.state = self.scriptDataEscapedDashState
+ elif data == "<":
+ self.state = self.scriptDataEscapedLessThanSignState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ elif data == EOF:
+ self.state = self.dataState
+ else:
+ chars = self.stream.charsUntil(("<", "-", "\u0000"))
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+ data + chars})
+ return True
+
+ def scriptDataEscapedDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ self.state = self.scriptDataEscapedDashDashState
+ elif data == "<":
+ self.state = self.scriptDataEscapedLessThanSignState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ self.state = self.scriptDataEscapedState
+ elif data == EOF:
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ self.state = self.scriptDataEscapedState
+ return True
+
+ def scriptDataEscapedDashDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ elif data == "<":
+ self.state = self.scriptDataEscapedLessThanSignState
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
+ self.state = self.scriptDataState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ self.state = self.scriptDataEscapedState
+ elif data == EOF:
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ self.state = self.scriptDataEscapedState
+ return True
+
+ def scriptDataEscapedLessThanSignState(self):
+ data = self.stream.char()
+ if data == "/":
+ self.temporaryBuffer = ""
+ self.state = self.scriptDataEscapedEndTagOpenState
+ elif data in asciiLetters:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
+ self.temporaryBuffer = data
+ self.state = self.scriptDataDoubleEscapeStartState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.stream.unget(data)
+ self.state = self.scriptDataEscapedState
+ return True
+
+ def scriptDataEscapedEndTagOpenState(self):
+ data = self.stream.char()
+ if data in asciiLetters:
+ self.temporaryBuffer = data
+ self.state = self.scriptDataEscapedEndTagNameState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+ self.stream.unget(data)
+ self.state = self.scriptDataEscapedState
+ return True
+
+ def scriptDataEscapedEndTagNameState(self):
+ appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+ data = self.stream.char()
+ if data in spaceCharacters and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.beforeAttributeNameState
+ elif data == "/" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.state = self.selfClosingStartTagState
+ elif data == ">" and appropriate:
+ self.currentToken = {"type": tokenTypes["EndTag"],
+ "name": self.temporaryBuffer,
+ "data": [], "selfClosing": False}
+ self.emitCurrentToken()
+ self.state = self.dataState
+ elif data in asciiLetters:
+ self.temporaryBuffer += data
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "</" + self.temporaryBuffer})
+ self.stream.unget(data)
+ self.state = self.scriptDataEscapedState
+ return True
+
+ def scriptDataDoubleEscapeStartState(self):
+ data = self.stream.char()
+ if data in (spaceCharacters | frozenset(("/", ">"))):
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ if self.temporaryBuffer.lower() == "script":
+ self.state = self.scriptDataDoubleEscapedState
+ else:
+ self.state = self.scriptDataEscapedState
+ elif data in asciiLetters:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ self.temporaryBuffer += data
+ else:
+ self.stream.unget(data)
+ self.state = self.scriptDataEscapedState
+ return True
+
+ def scriptDataDoubleEscapedState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ self.state = self.scriptDataDoubleEscapedDashState
+ elif data == "<":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.state = self.scriptDataDoubleEscapedLessThanSignState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ elif data == EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-script-in-script"})
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ return True
+
+ def scriptDataDoubleEscapedDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ self.state = self.scriptDataDoubleEscapedDashDashState
+ elif data == "<":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.state = self.scriptDataDoubleEscapedLessThanSignState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ self.state = self.scriptDataDoubleEscapedState
+ elif data == EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-script-in-script"})
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ self.state = self.scriptDataDoubleEscapedState
+ return True
+
+ def scriptDataDoubleEscapedDashDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+ elif data == "<":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+ self.state = self.scriptDataDoubleEscapedLessThanSignState
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
+ self.state = self.scriptDataState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": "\uFFFD"})
+ self.state = self.scriptDataDoubleEscapedState
+ elif data == EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-script-in-script"})
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ self.state = self.scriptDataDoubleEscapedState
+ return True
+
+ def scriptDataDoubleEscapedLessThanSignState(self):
+ data = self.stream.char()
+ if data == "/":
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
+ self.temporaryBuffer = ""
+ self.state = self.scriptDataDoubleEscapeEndState
+ else:
+ self.stream.unget(data)
+ self.state = self.scriptDataDoubleEscapedState
+ return True
+
+ def scriptDataDoubleEscapeEndState(self):
+ data = self.stream.char()
+ if data in (spaceCharacters | frozenset(("/", ">"))):
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ if self.temporaryBuffer.lower() == "script":
+ self.state = self.scriptDataEscapedState
+ else:
+ self.state = self.scriptDataDoubleEscapedState
+ elif data in asciiLetters:
+ self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+ self.temporaryBuffer += data
+ else:
+ self.stream.unget(data)
+ self.state = self.scriptDataDoubleEscapedState
+ return True
+
+ def beforeAttributeNameState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.stream.charsUntil(spaceCharacters, True)
+ elif data in asciiLetters:
+ self.currentToken["data"].append([data, ""])
+ self.state = self.attributeNameState
+ elif data == ">":
+ self.emitCurrentToken()
+ elif data == "/":
+ self.state = self.selfClosingStartTagState
+ elif data in ("'", '"', "=", "<"):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "invalid-character-in-attribute-name"})
+ self.currentToken["data"].append([data, ""])
+ self.state = self.attributeNameState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"].append(["\uFFFD", ""])
+ self.state = self.attributeNameState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-attribute-name-but-got-eof"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"].append([data, ""])
+ self.state = self.attributeNameState
+ return True
+
+ def attributeNameState(self):
+ data = self.stream.char()
+ leavingThisState = True
+ emitToken = False
+ if data == "=":
+ self.state = self.beforeAttributeValueState
+ elif data in asciiLetters:
+ self.currentToken["data"][-1][0] += data +\
+ self.stream.charsUntil(asciiLetters, True)
+ leavingThisState = False
+ elif data == ">":
+ # XXX If we emit here the attributes are converted to a dict
+ # without being checked and when the code below runs we error
+ # because data is a dict not a list
+ emitToken = True
+ elif data in spaceCharacters:
+ self.state = self.afterAttributeNameState
+ elif data == "/":
+ self.state = self.selfClosingStartTagState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"][-1][0] += "\uFFFD"
+ leavingThisState = False
+ elif data in ("'", '"', "<"):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data":
+ "invalid-character-in-attribute-name"})
+ self.currentToken["data"][-1][0] += data
+ leavingThisState = False
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "eof-in-attribute-name"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"][-1][0] += data
+ leavingThisState = False
+
+ if leavingThisState:
+ # Attributes are not dropped at this stage. That happens when the
+ # start tag token is emitted so values can still be safely appended
+ # to attributes, but we do want to report the parse error in time.
+ if self.lowercaseAttrName:
+ self.currentToken["data"][-1][0] = (
+ self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
+ for name, value in self.currentToken["data"][:-1]:
+ if self.currentToken["data"][-1][0] == name:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "duplicate-attribute"})
+ break
+ # XXX Fix for above XXX
+ if emitToken:
+ self.emitCurrentToken()
+ return True
+
+ def afterAttributeNameState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.stream.charsUntil(spaceCharacters, True)
+ elif data == "=":
+ self.state = self.beforeAttributeValueState
+ elif data == ">":
+ self.emitCurrentToken()
+ elif data in asciiLetters:
+ self.currentToken["data"].append([data, ""])
+ self.state = self.attributeNameState
+ elif data == "/":
+ self.state = self.selfClosingStartTagState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"].append(["\uFFFD", ""])
+ self.state = self.attributeNameState
+ elif data in ("'", '"', "<"):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "invalid-character-after-attribute-name"})
+ self.currentToken["data"].append([data, ""])
+ self.state = self.attributeNameState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-end-of-tag-but-got-eof"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"].append([data, ""])
+ self.state = self.attributeNameState
+ return True
+
+ def beforeAttributeValueState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.stream.charsUntil(spaceCharacters, True)
+ elif data == "\"":
+ self.state = self.attributeValueDoubleQuotedState
+ elif data == "&":
+ self.state = self.attributeValueUnQuotedState
+ self.stream.unget(data)
+ elif data == "'":
+ self.state = self.attributeValueSingleQuotedState
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-attribute-value-but-got-right-bracket"})
+ self.emitCurrentToken()
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"][-1][1] += "\uFFFD"
+ self.state = self.attributeValueUnQuotedState
+ elif data in ("=", "<", "`"):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "equals-in-unquoted-attribute-value"})
+ self.currentToken["data"][-1][1] += data
+ self.state = self.attributeValueUnQuotedState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-attribute-value-but-got-eof"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"][-1][1] += data
+ self.state = self.attributeValueUnQuotedState
+ return True
+
+ def attributeValueDoubleQuotedState(self):
+ data = self.stream.char()
+ if data == "\"":
+ self.state = self.afterAttributeValueState
+ elif data == "&":
+ self.processEntityInAttribute('"')
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"][-1][1] += "\uFFFD"
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-attribute-value-double-quote"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"][-1][1] += data +\
+ self.stream.charsUntil(("\"", "&", "\u0000"))
+ return True
+
+ def attributeValueSingleQuotedState(self):
+ data = self.stream.char()
+ if data == "'":
+ self.state = self.afterAttributeValueState
+ elif data == "&":
+ self.processEntityInAttribute("'")
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"][-1][1] += "\uFFFD"
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-attribute-value-single-quote"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"][-1][1] += data +\
+ self.stream.charsUntil(("'", "&", "\u0000"))
+ return True
+
+ def attributeValueUnQuotedState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.beforeAttributeNameState
+ elif data == "&":
+ self.processEntityInAttribute(">")
+ elif data == ">":
+ self.emitCurrentToken()
+ elif data in ('"', "'", "=", "<", "`"):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-character-in-unquoted-attribute-value"})
+ self.currentToken["data"][-1][1] += data
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"][-1][1] += "\uFFFD"
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-attribute-value-no-quotes"})
+ self.state = self.dataState
+ else:
+ self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
+ frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
+ return True
+
+ def afterAttributeValueState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.beforeAttributeNameState
+ elif data == ">":
+ self.emitCurrentToken()
+ elif data == "/":
+ self.state = self.selfClosingStartTagState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-EOF-after-attribute-value"})
+ self.stream.unget(data)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-character-after-attribute-value"})
+ self.stream.unget(data)
+ self.state = self.beforeAttributeNameState
+ return True
+
+ def selfClosingStartTagState(self):
+ data = self.stream.char()
+ if data == ">":
+ self.currentToken["selfClosing"] = True
+ self.emitCurrentToken()
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data":
+ "unexpected-EOF-after-solidus-in-tag"})
+ self.stream.unget(data)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-character-after-solidus-in-tag"})
+ self.stream.unget(data)
+ self.state = self.beforeAttributeNameState
+ return True
+
+ def bogusCommentState(self):
+ # Make a new comment token and give it as value all the characters
+ # until the first > or EOF (charsUntil checks for EOF automatically)
+ # and emit it.
+ data = self.stream.charsUntil(">")
+ data = data.replace("\u0000", "\uFFFD")
+ self.tokenQueue.append(
+ {"type": tokenTypes["Comment"], "data": data})
+
+ # Eat the character directly after the bogus comment which is either a
+ # ">" or an EOF.
+ self.stream.char()
+ self.state = self.dataState
+ return True
+
+ def markupDeclarationOpenState(self):
+ charStack = [self.stream.char()]
+ if charStack[-1] == "-":
+ charStack.append(self.stream.char())
+ if charStack[-1] == "-":
+ self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
+ self.state = self.commentStartState
+ return True
+ elif charStack[-1] in ('d', 'D'):
+ matched = True
+ for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
+ ('y', 'Y'), ('p', 'P'), ('e', 'E')):
+ charStack.append(self.stream.char())
+ if charStack[-1] not in expected:
+ matched = False
+ break
+ if matched:
+ self.currentToken = {"type": tokenTypes["Doctype"],
+ "name": "",
+ "publicId": None, "systemId": None,
+ "correct": True}
+ self.state = self.doctypeState
+ return True
+ elif (charStack[-1] == "[" and
+ self.parser is not None and
+ self.parser.tree.openElements and
+ self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
+ matched = True
+ for expected in ["C", "D", "A", "T", "A", "["]:
+ charStack.append(self.stream.char())
+ if charStack[-1] != expected:
+ matched = False
+ break
+ if matched:
+ self.state = self.cdataSectionState
+ return True
+
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-dashes-or-doctype"})
+
+ while charStack:
+ self.stream.unget(charStack.pop())
+ self.state = self.bogusCommentState
+ return True
+
+ def commentStartState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.state = self.commentStartDashState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"] += "\uFFFD"
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "incorrect-comment"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-comment"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["data"] += data
+ self.state = self.commentState
+ return True
+
+ def commentStartDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.state = self.commentEndState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"] += "-\uFFFD"
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "incorrect-comment"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-comment"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["data"] += "-" + data
+ self.state = self.commentState
+ return True
+
+ def commentState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.state = self.commentEndDashState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"] += "\uFFFD"
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "eof-in-comment"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["data"] += data + \
+ self.stream.charsUntil(("-", "\u0000"))
+ return True
+
+ def commentEndDashState(self):
+ data = self.stream.char()
+ if data == "-":
+ self.state = self.commentEndState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"] += "-\uFFFD"
+ self.state = self.commentState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-comment-end-dash"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["data"] += "-" + data
+ self.state = self.commentState
+ return True
+
+ def commentEndState(self):
+ data = self.stream.char()
+ if data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"] += "--\uFFFD"
+ self.state = self.commentState
+ elif data == "!":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-bang-after-double-dash-in-comment"})
+ self.state = self.commentEndBangState
+ elif data == "-":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-dash-after-double-dash-in-comment"})
+ self.currentToken["data"] += data
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-comment-double-dash"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ # XXX
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-comment"})
+ self.currentToken["data"] += "--" + data
+ self.state = self.commentState
+ return True
+
+ def commentEndBangState(self):
+ data = self.stream.char()
+ if data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data == "-":
+ self.currentToken["data"] += "--!"
+ self.state = self.commentEndDashState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["data"] += "--!\uFFFD"
+ self.state = self.commentState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-comment-end-bang-state"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["data"] += "--!" + data
+ self.state = self.commentState
+ return True
+
+ def doctypeState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.beforeDoctypeNameState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-doctype-name-but-got-eof"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "need-space-after-doctype"})
+ self.stream.unget(data)
+ self.state = self.beforeDoctypeNameState
+ return True
+
+ def beforeDoctypeNameState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ pass
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-doctype-name-but-got-right-bracket"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["name"] = "\uFFFD"
+ self.state = self.doctypeNameState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-doctype-name-but-got-eof"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["name"] = data
+ self.state = self.doctypeNameState
+ return True
+
+ def doctypeNameState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
+ self.state = self.afterDoctypeNameState
+ elif data == ">":
+ self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["name"] += "\uFFFD"
+ self.state = self.doctypeNameState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype-name"})
+ self.currentToken["correct"] = False
+ self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["name"] += data
+ return True
+
+ def afterDoctypeNameState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ pass
+ elif data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.currentToken["correct"] = False
+ self.stream.unget(data)
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ if data in ("p", "P"):
+ matched = True
+ for expected in (("u", "U"), ("b", "B"), ("l", "L"),
+ ("i", "I"), ("c", "C")):
+ data = self.stream.char()
+ if data not in expected:
+ matched = False
+ break
+ if matched:
+ self.state = self.afterDoctypePublicKeywordState
+ return True
+ elif data in ("s", "S"):
+ matched = True
+ for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
+ ("e", "E"), ("m", "M")):
+ data = self.stream.char()
+ if data not in expected:
+ matched = False
+ break
+ if matched:
+ self.state = self.afterDoctypeSystemKeywordState
+ return True
+
+ # All the characters read before the current 'data' will be
+ # [a-zA-Z], so they're garbage in the bogus doctype and can be
+ # discarded; only the latest character might be '>' or EOF
+ # and needs to be ungetted
+ self.stream.unget(data)
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "expected-space-or-right-bracket-in-doctype", "datavars":
+ {"data": data}})
+ self.currentToken["correct"] = False
+ self.state = self.bogusDoctypeState
+
+ return True
+
+ def afterDoctypePublicKeywordState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.beforeDoctypePublicIdentifierState
+ elif data in ("'", '"'):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.stream.unget(data)
+ self.state = self.beforeDoctypePublicIdentifierState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.stream.unget(data)
+ self.state = self.beforeDoctypePublicIdentifierState
+ return True
+
+ def beforeDoctypePublicIdentifierState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ pass
+ elif data == "\"":
+ self.currentToken["publicId"] = ""
+ self.state = self.doctypePublicIdentifierDoubleQuotedState
+ elif data == "'":
+ self.currentToken["publicId"] = ""
+ self.state = self.doctypePublicIdentifierSingleQuotedState
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-end-of-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["correct"] = False
+ self.state = self.bogusDoctypeState
+ return True
+
+ def doctypePublicIdentifierDoubleQuotedState(self):
+ data = self.stream.char()
+ if data == "\"":
+ self.state = self.afterDoctypePublicIdentifierState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["publicId"] += "\uFFFD"
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-end-of-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["publicId"] += data
+ return True
+
+ def doctypePublicIdentifierSingleQuotedState(self):
+ data = self.stream.char()
+ if data == "'":
+ self.state = self.afterDoctypePublicIdentifierState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["publicId"] += "\uFFFD"
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-end-of-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["publicId"] += data
+ return True
+
+ def afterDoctypePublicIdentifierState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.betweenDoctypePublicAndSystemIdentifiersState
+ elif data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data == '"':
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["systemId"] = ""
+ self.state = self.doctypeSystemIdentifierDoubleQuotedState
+ elif data == "'":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["systemId"] = ""
+ self.state = self.doctypeSystemIdentifierSingleQuotedState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["correct"] = False
+ self.state = self.bogusDoctypeState
+ return True
+
+ def betweenDoctypePublicAndSystemIdentifiersState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ pass
+ elif data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data == '"':
+ self.currentToken["systemId"] = ""
+ self.state = self.doctypeSystemIdentifierDoubleQuotedState
+ elif data == "'":
+ self.currentToken["systemId"] = ""
+ self.state = self.doctypeSystemIdentifierSingleQuotedState
+ elif data == EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["correct"] = False
+ self.state = self.bogusDoctypeState
+ return True
+
+ def afterDoctypeSystemKeywordState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ self.state = self.beforeDoctypeSystemIdentifierState
+ elif data in ("'", '"'):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.stream.unget(data)
+ self.state = self.beforeDoctypeSystemIdentifierState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.stream.unget(data)
+ self.state = self.beforeDoctypeSystemIdentifierState
+ return True
+
+ def beforeDoctypeSystemIdentifierState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ pass
+ elif data == "\"":
+ self.currentToken["systemId"] = ""
+ self.state = self.doctypeSystemIdentifierDoubleQuotedState
+ elif data == "'":
+ self.currentToken["systemId"] = ""
+ self.state = self.doctypeSystemIdentifierSingleQuotedState
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.currentToken["correct"] = False
+ self.state = self.bogusDoctypeState
+ return True
+
+ def doctypeSystemIdentifierDoubleQuotedState(self):
+ data = self.stream.char()
+ if data == "\"":
+ self.state = self.afterDoctypeSystemIdentifierState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["systemId"] += "\uFFFD"
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-end-of-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["systemId"] += data
+ return True
+
+ def doctypeSystemIdentifierSingleQuotedState(self):
+ data = self.stream.char()
+ if data == "'":
+ self.state = self.afterDoctypeSystemIdentifierState
+ elif data == "\u0000":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ self.currentToken["systemId"] += "\uFFFD"
+ elif data == ">":
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-end-of-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.currentToken["systemId"] += data
+ return True
+
+ def afterDoctypeSystemIdentifierState(self):
+ data = self.stream.char()
+ if data in spaceCharacters:
+ pass
+ elif data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "eof-in-doctype"})
+ self.currentToken["correct"] = False
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+ "unexpected-char-in-doctype"})
+ self.state = self.bogusDoctypeState
+ return True
+
+ def bogusDoctypeState(self):
+ data = self.stream.char()
+ if data == ">":
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ elif data is EOF:
+ # XXX EMIT
+ self.stream.unget(data)
+ self.tokenQueue.append(self.currentToken)
+ self.state = self.dataState
+ else:
+ pass
+ return True
+
+ def cdataSectionState(self):
+ data = []
+ while True:
+ data.append(self.stream.charsUntil("]"))
+ data.append(self.stream.charsUntil(">"))
+ char = self.stream.char()
+ if char == EOF:
+ break
+ else:
+ assert char == ">"
+ if data[-1][-2:] == "]]":
+ data[-1] = data[-1][:-2]
+ break
+ else:
+ data.append(char)
+
+ data = "".join(data)
+ # Deal with null here rather than in the parser
+ nullCount = data.count("\u0000")
+ if nullCount > 0:
+ for i in range(nullCount):
+ self.tokenQueue.append({"type": tokenTypes["ParseError"],
+ "data": "invalid-codepoint"})
+ data = data.replace("\u0000", "\uFFFD")
+ if data:
+ self.tokenQueue.append({"type": tokenTypes["Characters"],
+ "data": data})
+ self.state = self.dataState
+ return True
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/__init__.py
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/sax.py b/testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/sax.py
new file mode 100644
index 000000000..ad47df956
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treeadapters/sax.py
@@ -0,0 +1,44 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from xml.sax.xmlreader import AttributesNSImpl
+
+from ..constants import adjustForeignAttributes, unadjustForeignAttributes
+
+prefix_mapping = {}
+for prefix, localName, namespace in adjustForeignAttributes.values():
+ if prefix is not None:
+ prefix_mapping[prefix] = namespace
+
+
+def to_sax(walker, handler):
+ """Call SAX-like content handler based on treewalker walker"""
+ handler.startDocument()
+ for prefix, namespace in prefix_mapping.items():
+ handler.startPrefixMapping(prefix, namespace)
+
+ for token in walker:
+ type = token["type"]
+ if type == "Doctype":
+ continue
+ elif type in ("StartTag", "EmptyTag"):
+ attrs = AttributesNSImpl(token["data"],
+ unadjustForeignAttributes)
+ handler.startElementNS((token["namespace"], token["name"]),
+ token["name"],
+ attrs)
+ if type == "EmptyTag":
+ handler.endElementNS((token["namespace"], token["name"]),
+ token["name"])
+ elif type == "EndTag":
+ handler.endElementNS((token["namespace"], token["name"]),
+ token["name"])
+ elif type in ("Characters", "SpaceCharacters"):
+ handler.characters(token["data"])
+ elif type == "Comment":
+ pass
+ else:
+ assert False, "Unknown token type"
+
+ for prefix, namespace in prefix_mapping.items():
+ handler.endPrefixMapping(prefix)
+ handler.endDocument()
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/__init__.py
new file mode 100644
index 000000000..6a6b2a4c4
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/__init__.py
@@ -0,0 +1,76 @@
+"""A collection of modules for building different kinds of tree from
+HTML documents.
+
+To create a treebuilder for a new type of tree, you need to do
+implement several things:
+
+1) A set of classes for various types of elements: Document, Doctype,
+Comment, Element. These must implement the interface of
+_base.treebuilders.Node (although comment nodes have a different
+signature for their constructor, see treebuilders.etree.Comment)
+Textual content may also be implemented as another node type, or not, as
+your tree implementation requires.
+
+2) A treebuilder object (called TreeBuilder by convention) that
+inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
+documentClass - the class to use for the bottommost node of a document
+elementClass - the class to use for HTML Elements
+commentClass - the class to use for comments
+doctypeClass - the class to use for doctypes
+It also has one required method:
+getDocument - Returns the root node of the complete document tree
+
+3) If you wish to run the unit tests, you must also create a
+testSerializer method on your treebuilder which accepts a node and
+returns a string containing Node and its children serialized according
+to the format used in the unittests
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+from ..utils import default_etree
+
+treeBuilderCache = {}
+
+
+def getTreeBuilder(treeType, implementation=None, **kwargs):
+ """Get a TreeBuilder class for various types of tree with built-in support
+
+ treeType - the name of the tree type required (case-insensitive). Supported
+ values are:
+
+ "dom" - A generic builder for DOM implementations, defaulting to
+ a xml.dom.minidom based implementation.
+ "etree" - A generic builder for tree implementations exposing an
+ ElementTree-like interface, defaulting to
+ xml.etree.cElementTree if available and
+ xml.etree.ElementTree if not.
+ "lxml" - A etree-based builder for lxml.etree, handling
+ limitations of lxml's implementation.
+
+ implementation - (Currently applies to the "etree" and "dom" tree types). A
+ module implementing the tree type e.g.
+ xml.etree.ElementTree or xml.etree.cElementTree."""
+
+ treeType = treeType.lower()
+ if treeType not in treeBuilderCache:
+ if treeType == "dom":
+ from . import dom
+ # Come up with a sane default (pref. from the stdlib)
+ if implementation is None:
+ from xml.dom import minidom
+ implementation = minidom
+ # NEVER cache here, caching is done in the dom submodule
+ return dom.getDomModule(implementation, **kwargs).TreeBuilder
+ elif treeType == "lxml":
+ from . import etree_lxml
+ treeBuilderCache[treeType] = etree_lxml.TreeBuilder
+ elif treeType == "etree":
+ from . import etree
+ if implementation is None:
+ implementation = default_etree
+ # NEVER cache here, caching is done in the etree submodule
+ return etree.getETreeModule(implementation, **kwargs).TreeBuilder
+ else:
+ raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
+ return treeBuilderCache.get(treeType)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/_base.py b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/_base.py
new file mode 100644
index 000000000..8b97cc11a
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/_base.py
@@ -0,0 +1,377 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+
+from ..constants import scopingElements, tableInsertModeElements, namespaces
+
+# The scope markers are inserted when entering object elements,
+# marquees, table cells, and table captions, and are used to prevent formatting
+# from "leaking" into tables, object elements, and marquees.
+Marker = None
+
+listElementsMap = {
+ None: (frozenset(scopingElements), False),
+ "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
+ "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
+ (namespaces["html"], "ul")])), False),
+ "table": (frozenset([(namespaces["html"], "html"),
+ (namespaces["html"], "table")]), False),
+ "select": (frozenset([(namespaces["html"], "optgroup"),
+ (namespaces["html"], "option")]), True)
+}
+
+
+class Node(object):
+ def __init__(self, name):
+ """Node representing an item in the tree.
+ name - The tag name associated with the node
+ parent - The parent of the current node (or None for the document node)
+ value - The value of the current node (applies to text nodes and
+ comments
+ attributes - a dict holding name, value pairs for attributes of the node
+ childNodes - a list of child nodes of the current node. This must
+ include all elements but not necessarily other node types
+ _flags - A list of miscellaneous flags that can be set on the node
+ """
+ self.name = name
+ self.parent = None
+ self.value = None
+ self.attributes = {}
+ self.childNodes = []
+ self._flags = []
+
+ def __str__(self):
+ attributesStr = " ".join(["%s=\"%s\"" % (name, value)
+ for name, value in
+ self.attributes.items()])
+ if attributesStr:
+ return "<%s %s>" % (self.name, attributesStr)
+ else:
+ return "<%s>" % (self.name)
+
+ def __repr__(self):
+ return "<%s>" % (self.name)
+
+ def appendChild(self, node):
+ """Insert node as a child of the current node
+ """
+ raise NotImplementedError
+
+ def insertText(self, data, insertBefore=None):
+ """Insert data as text in the current node, positioned before the
+ start of node insertBefore or to the end of the node's text.
+ """
+ raise NotImplementedError
+
+ def insertBefore(self, node, refNode):
+ """Insert node as a child of the current node, before refNode in the
+ list of child nodes. Raises ValueError if refNode is not a child of
+ the current node"""
+ raise NotImplementedError
+
+ def removeChild(self, node):
+ """Remove node from the children of the current node
+ """
+ raise NotImplementedError
+
+ def reparentChildren(self, newParent):
+ """Move all the children of the current node to newParent.
+ This is needed so that trees that don't store text as nodes move the
+ text in the correct way
+ """
+ # XXX - should this method be made more general?
+ for child in self.childNodes:
+ newParent.appendChild(child)
+ self.childNodes = []
+
+ def cloneNode(self):
+ """Return a shallow copy of the current node i.e. a node with the same
+ name and attributes but with no parent or child nodes
+ """
+ raise NotImplementedError
+
+ def hasContent(self):
+ """Return true if the node has children or text, false otherwise
+ """
+ raise NotImplementedError
+
+
+class ActiveFormattingElements(list):
+ def append(self, node):
+ equalCount = 0
+ if node != Marker:
+ for element in self[::-1]:
+ if element == Marker:
+ break
+ if self.nodesEqual(element, node):
+ equalCount += 1
+ if equalCount == 3:
+ self.remove(element)
+ break
+ list.append(self, node)
+
+ def nodesEqual(self, node1, node2):
+ if not node1.nameTuple == node2.nameTuple:
+ return False
+
+ if not node1.attributes == node2.attributes:
+ return False
+
+ return True
+
+
+class TreeBuilder(object):
+ """Base treebuilder implementation
+ documentClass - the class to use for the bottommost node of a document
+ elementClass - the class to use for HTML Elements
+ commentClass - the class to use for comments
+ doctypeClass - the class to use for doctypes
+ """
+
+ # Document class
+ documentClass = None
+
+ # The class to use for creating a node
+ elementClass = None
+
+ # The class to use for creating comments
+ commentClass = None
+
+ # The class to use for creating doctypes
+ doctypeClass = None
+
+ # Fragment class
+ fragmentClass = None
+
+ def __init__(self, namespaceHTMLElements):
+ if namespaceHTMLElements:
+ self.defaultNamespace = "http://www.w3.org/1999/xhtml"
+ else:
+ self.defaultNamespace = None
+ self.reset()
+
+ def reset(self):
+ self.openElements = []
+ self.activeFormattingElements = ActiveFormattingElements()
+
+ # XXX - rename these to headElement, formElement
+ self.headPointer = None
+ self.formPointer = None
+
+ self.insertFromTable = False
+
+ self.document = self.documentClass()
+
+ def elementInScope(self, target, variant=None):
+
+ # If we pass a node in we match that. if we pass a string
+ # match any node with that name
+ exactNode = hasattr(target, "nameTuple")
+
+ listElements, invert = listElementsMap[variant]
+
+ for node in reversed(self.openElements):
+ if (node.name == target and not exactNode or
+ node == target and exactNode):
+ return True
+ elif (invert ^ (node.nameTuple in listElements)):
+ return False
+
+ assert False # We should never reach this point
+
+ def reconstructActiveFormattingElements(self):
+ # Within this algorithm the order of steps described in the
+ # specification is not quite the same as the order of steps in the
+ # code. It should still do the same though.
+
+ # Step 1: stop the algorithm when there's nothing to do.
+ if not self.activeFormattingElements:
+ return
+
+ # Step 2 and step 3: we start with the last element. So i is -1.
+ i = len(self.activeFormattingElements) - 1
+ entry = self.activeFormattingElements[i]
+ if entry == Marker or entry in self.openElements:
+ return
+
+ # Step 6
+ while entry != Marker and entry not in self.openElements:
+ if i == 0:
+ # This will be reset to 0 below
+ i = -1
+ break
+ i -= 1
+ # Step 5: let entry be one earlier in the list.
+ entry = self.activeFormattingElements[i]
+
+ while True:
+ # Step 7
+ i += 1
+
+ # Step 8
+ entry = self.activeFormattingElements[i]
+ clone = entry.cloneNode() # Mainly to get a new copy of the attributes
+
+ # Step 9
+ element = self.insertElement({"type": "StartTag",
+ "name": clone.name,
+ "namespace": clone.namespace,
+ "data": clone.attributes})
+
+ # Step 10
+ self.activeFormattingElements[i] = element
+
+ # Step 11
+ if element == self.activeFormattingElements[-1]:
+ break
+
+ def clearActiveFormattingElements(self):
+ entry = self.activeFormattingElements.pop()
+ while self.activeFormattingElements and entry != Marker:
+ entry = self.activeFormattingElements.pop()
+
+ def elementInActiveFormattingElements(self, name):
+ """Check if an element exists between the end of the active
+ formatting elements and the last marker. If it does, return it, else
+ return false"""
+
+ for item in self.activeFormattingElements[::-1]:
+ # Check for Marker first because if it's a Marker it doesn't have a
+ # name attribute.
+ if item == Marker:
+ break
+ elif item.name == name:
+ return item
+ return False
+
+ def insertRoot(self, token):
+ element = self.createElement(token)
+ self.openElements.append(element)
+ self.document.appendChild(element)
+
+ def insertDoctype(self, token):
+ name = token["name"]
+ publicId = token["publicId"]
+ systemId = token["systemId"]
+
+ doctype = self.doctypeClass(name, publicId, systemId)
+ self.document.appendChild(doctype)
+
+ def insertComment(self, token, parent=None):
+ if parent is None:
+ parent = self.openElements[-1]
+ parent.appendChild(self.commentClass(token["data"]))
+
+ def createElement(self, token):
+ """Create an element but don't insert it anywhere"""
+ name = token["name"]
+ namespace = token.get("namespace", self.defaultNamespace)
+ element = self.elementClass(name, namespace)
+ element.attributes = token["data"]
+ return element
+
+ def _getInsertFromTable(self):
+ return self._insertFromTable
+
+ def _setInsertFromTable(self, value):
+ """Switch the function used to insert an element from the
+ normal one to the misnested table one and back again"""
+ self._insertFromTable = value
+ if value:
+ self.insertElement = self.insertElementTable
+ else:
+ self.insertElement = self.insertElementNormal
+
+ insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
+
+ def insertElementNormal(self, token):
+ name = token["name"]
+ assert isinstance(name, text_type), "Element %s not unicode" % name
+ namespace = token.get("namespace", self.defaultNamespace)
+ element = self.elementClass(name, namespace)
+ element.attributes = token["data"]
+ self.openElements[-1].appendChild(element)
+ self.openElements.append(element)
+ return element
+
+ def insertElementTable(self, token):
+ """Create an element and insert it into the tree"""
+ element = self.createElement(token)
+ if self.openElements[-1].name not in tableInsertModeElements:
+ return self.insertElementNormal(token)
+ else:
+ # We should be in the InTable mode. This means we want to do
+ # special magic element rearranging
+ parent, insertBefore = self.getTableMisnestedNodePosition()
+ if insertBefore is None:
+ parent.appendChild(element)
+ else:
+ parent.insertBefore(element, insertBefore)
+ self.openElements.append(element)
+ return element
+
+ def insertText(self, data, parent=None):
+ """Insert text data."""
+ if parent is None:
+ parent = self.openElements[-1]
+
+ if (not self.insertFromTable or (self.insertFromTable and
+ self.openElements[-1].name
+ not in tableInsertModeElements)):
+ parent.insertText(data)
+ else:
+ # We should be in the InTable mode. This means we want to do
+ # special magic element rearranging
+ parent, insertBefore = self.getTableMisnestedNodePosition()
+ parent.insertText(data, insertBefore)
+
+ def getTableMisnestedNodePosition(self):
+ """Get the foster parent element, and sibling to insert before
+ (or None) when inserting a misnested table node"""
+ # The foster parent element is the one which comes before the most
+ # recently opened table element
+ # XXX - this is really inelegant
+ lastTable = None
+ fosterParent = None
+ insertBefore = None
+ for elm in self.openElements[::-1]:
+ if elm.name == "table":
+ lastTable = elm
+ break
+ if lastTable:
+ # XXX - we should really check that this parent is actually a
+ # node here
+ if lastTable.parent:
+ fosterParent = lastTable.parent
+ insertBefore = lastTable
+ else:
+ fosterParent = self.openElements[
+ self.openElements.index(lastTable) - 1]
+ else:
+ fosterParent = self.openElements[0]
+ return fosterParent, insertBefore
+
+ def generateImpliedEndTags(self, exclude=None):
+ name = self.openElements[-1].name
+ # XXX td, th and tr are not actually needed
+ if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
+ and name != exclude):
+ self.openElements.pop()
+ # XXX This is not entirely what the specification says. We should
+ # investigate it more closely.
+ self.generateImpliedEndTags(exclude)
+
+ def getDocument(self):
+ "Return the final tree"
+ return self.document
+
+ def getFragment(self):
+ "Return the final fragment"
+ # assert self.innerHTML
+ fragment = self.fragmentClass()
+ self.openElements[0].reparentChildren(fragment)
+ return fragment
+
+ def testSerializer(self, node):
+ """Serialize the subtree of node in the format required by unit tests
+ node - the node from which to start serializing"""
+ raise NotImplementedError
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/dom.py b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/dom.py
new file mode 100644
index 000000000..234233b79
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/dom.py
@@ -0,0 +1,227 @@
+from __future__ import absolute_import, division, unicode_literals
+
+
+from xml.dom import minidom, Node
+import weakref
+
+from . import _base
+from .. import constants
+from ..constants import namespaces
+from ..utils import moduleFactoryFactory
+
+
+def getDomBuilder(DomImplementation):
+ Dom = DomImplementation
+
+ class AttrList(object):
+ def __init__(self, element):
+ self.element = element
+
+ def __iter__(self):
+ return list(self.element.attributes.items()).__iter__()
+
+ def __setitem__(self, name, value):
+ self.element.setAttribute(name, value)
+
+ def __len__(self):
+ return len(list(self.element.attributes.items()))
+
+ def items(self):
+ return [(item[0], item[1]) for item in
+ list(self.element.attributes.items())]
+
+ def keys(self):
+ return list(self.element.attributes.keys())
+
+ def __getitem__(self, name):
+ return self.element.getAttribute(name)
+
+ def __contains__(self, name):
+ if isinstance(name, tuple):
+ raise NotImplementedError
+ else:
+ return self.element.hasAttribute(name)
+
+ class NodeBuilder(_base.Node):
+ def __init__(self, element):
+ _base.Node.__init__(self, element.nodeName)
+ self.element = element
+
+ namespace = property(lambda self: hasattr(self.element, "namespaceURI")
+ and self.element.namespaceURI or None)
+
+ def appendChild(self, node):
+ node.parent = self
+ self.element.appendChild(node.element)
+
+ def insertText(self, data, insertBefore=None):
+ text = self.element.ownerDocument.createTextNode(data)
+ if insertBefore:
+ self.element.insertBefore(text, insertBefore.element)
+ else:
+ self.element.appendChild(text)
+
+ def insertBefore(self, node, refNode):
+ self.element.insertBefore(node.element, refNode.element)
+ node.parent = self
+
+ def removeChild(self, node):
+ if node.element.parentNode == self.element:
+ self.element.removeChild(node.element)
+ node.parent = None
+
+ def reparentChildren(self, newParent):
+ while self.element.hasChildNodes():
+ child = self.element.firstChild
+ self.element.removeChild(child)
+ newParent.element.appendChild(child)
+ self.childNodes = []
+
+ def getAttributes(self):
+ return AttrList(self.element)
+
+ def setAttributes(self, attributes):
+ if attributes:
+ for name, value in list(attributes.items()):
+ if isinstance(name, tuple):
+ if name[0] is not None:
+ qualifiedName = (name[0] + ":" + name[1])
+ else:
+ qualifiedName = name[1]
+ self.element.setAttributeNS(name[2], qualifiedName,
+ value)
+ else:
+ self.element.setAttribute(
+ name, value)
+ attributes = property(getAttributes, setAttributes)
+
+ def cloneNode(self):
+ return NodeBuilder(self.element.cloneNode(False))
+
+ def hasContent(self):
+ return self.element.hasChildNodes()
+
+ def getNameTuple(self):
+ if self.namespace is None:
+ return namespaces["html"], self.name
+ else:
+ return self.namespace, self.name
+
+ nameTuple = property(getNameTuple)
+
+ class TreeBuilder(_base.TreeBuilder):
+ def documentClass(self):
+ self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
+ return weakref.proxy(self)
+
+ def insertDoctype(self, token):
+ name = token["name"]
+ publicId = token["publicId"]
+ systemId = token["systemId"]
+
+ domimpl = Dom.getDOMImplementation()
+ doctype = domimpl.createDocumentType(name, publicId, systemId)
+ self.document.appendChild(NodeBuilder(doctype))
+ if Dom == minidom:
+ doctype.ownerDocument = self.dom
+
+ def elementClass(self, name, namespace=None):
+ if namespace is None and self.defaultNamespace is None:
+ node = self.dom.createElement(name)
+ else:
+ node = self.dom.createElementNS(namespace, name)
+
+ return NodeBuilder(node)
+
+ def commentClass(self, data):
+ return NodeBuilder(self.dom.createComment(data))
+
+ def fragmentClass(self):
+ return NodeBuilder(self.dom.createDocumentFragment())
+
+ def appendChild(self, node):
+ self.dom.appendChild(node.element)
+
+ def testSerializer(self, element):
+ return testSerializer(element)
+
+ def getDocument(self):
+ return self.dom
+
+ def getFragment(self):
+ return _base.TreeBuilder.getFragment(self).element
+
+ def insertText(self, data, parent=None):
+ data = data
+ if parent != self:
+ _base.TreeBuilder.insertText(self, data, parent)
+ else:
+ # HACK: allow text nodes as children of the document node
+ if hasattr(self.dom, '_child_node_types'):
+ if Node.TEXT_NODE not in self.dom._child_node_types:
+ self.dom._child_node_types = list(self.dom._child_node_types)
+ self.dom._child_node_types.append(Node.TEXT_NODE)
+ self.dom.appendChild(self.dom.createTextNode(data))
+
+ implementation = DomImplementation
+ name = None
+
+ def testSerializer(element):
+ element.normalize()
+ rv = []
+
+ def serializeElement(element, indent=0):
+ if element.nodeType == Node.DOCUMENT_TYPE_NODE:
+ if element.name:
+ if element.publicId or element.systemId:
+ publicId = element.publicId or ""
+ systemId = element.systemId or ""
+ rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
+ (' ' * indent, element.name, publicId, systemId))
+ else:
+ rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
+ else:
+ rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
+ elif element.nodeType == Node.DOCUMENT_NODE:
+ rv.append("#document")
+ elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
+ rv.append("#document-fragment")
+ elif element.nodeType == Node.COMMENT_NODE:
+ rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
+ elif element.nodeType == Node.TEXT_NODE:
+ rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
+ else:
+ if (hasattr(element, "namespaceURI") and
+ element.namespaceURI is not None):
+ name = "%s %s" % (constants.prefixes[element.namespaceURI],
+ element.nodeName)
+ else:
+ name = element.nodeName
+ rv.append("|%s<%s>" % (' ' * indent, name))
+ if element.hasAttributes():
+ attributes = []
+ for i in range(len(element.attributes)):
+ attr = element.attributes.item(i)
+ name = attr.nodeName
+ value = attr.value
+ ns = attr.namespaceURI
+ if ns:
+ name = "%s %s" % (constants.prefixes[ns], attr.localName)
+ else:
+ name = attr.nodeName
+ attributes.append((name, value))
+
+ for name, value in sorted(attributes):
+ rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
+ indent += 2
+ for child in element.childNodes:
+ serializeElement(child, indent)
+ serializeElement(element, 0)
+
+ return "\n".join(rv)
+
+ return locals()
+
+
+# The actual means to get a module!
+getDomModule = moduleFactoryFactory(getDomBuilder)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree.py b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree.py
new file mode 100644
index 000000000..2c8ed19f8
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree.py
@@ -0,0 +1,337 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+
+import re
+
+from . import _base
+from .. import ihatexml
+from .. import constants
+from ..constants import namespaces
+from ..utils import moduleFactoryFactory
+
+tag_regexp = re.compile("{([^}]*)}(.*)")
+
+
+def getETreeBuilder(ElementTreeImplementation, fullTree=False):
+ ElementTree = ElementTreeImplementation
+ ElementTreeCommentType = ElementTree.Comment("asd").tag
+
+ class Element(_base.Node):
+ def __init__(self, name, namespace=None):
+ self._name = name
+ self._namespace = namespace
+ self._element = ElementTree.Element(self._getETreeTag(name,
+ namespace))
+ if namespace is None:
+ self.nameTuple = namespaces["html"], self._name
+ else:
+ self.nameTuple = self._namespace, self._name
+ self.parent = None
+ self._childNodes = []
+ self._flags = []
+
+ def _getETreeTag(self, name, namespace):
+ if namespace is None:
+ etree_tag = name
+ else:
+ etree_tag = "{%s}%s" % (namespace, name)
+ return etree_tag
+
+ def _setName(self, name):
+ self._name = name
+ self._element.tag = self._getETreeTag(self._name, self._namespace)
+
+ def _getName(self):
+ return self._name
+
+ name = property(_getName, _setName)
+
+ def _setNamespace(self, namespace):
+ self._namespace = namespace
+ self._element.tag = self._getETreeTag(self._name, self._namespace)
+
+ def _getNamespace(self):
+ return self._namespace
+
+ namespace = property(_getNamespace, _setNamespace)
+
+ def _getAttributes(self):
+ return self._element.attrib
+
+ def _setAttributes(self, attributes):
+ # Delete existing attributes first
+ # XXX - there may be a better way to do this...
+ for key in list(self._element.attrib.keys()):
+ del self._element.attrib[key]
+ for key, value in attributes.items():
+ if isinstance(key, tuple):
+ name = "{%s}%s" % (key[2], key[1])
+ else:
+ name = key
+ self._element.set(name, value)
+
+ attributes = property(_getAttributes, _setAttributes)
+
+ def _getChildNodes(self):
+ return self._childNodes
+
+ def _setChildNodes(self, value):
+ del self._element[:]
+ self._childNodes = []
+ for element in value:
+ self.insertChild(element)
+
+ childNodes = property(_getChildNodes, _setChildNodes)
+
+ def hasContent(self):
+ """Return true if the node has children or text"""
+ return bool(self._element.text or len(self._element))
+
+ def appendChild(self, node):
+ self._childNodes.append(node)
+ self._element.append(node._element)
+ node.parent = self
+
+ def insertBefore(self, node, refNode):
+ index = list(self._element).index(refNode._element)
+ self._element.insert(index, node._element)
+ node.parent = self
+
+ def removeChild(self, node):
+ self._element.remove(node._element)
+ node.parent = None
+
+ def insertText(self, data, insertBefore=None):
+ if not(len(self._element)):
+ if not self._element.text:
+ self._element.text = ""
+ self._element.text += data
+ elif insertBefore is None:
+ # Insert the text as the tail of the last child element
+ if not self._element[-1].tail:
+ self._element[-1].tail = ""
+ self._element[-1].tail += data
+ else:
+ # Insert the text before the specified node
+ children = list(self._element)
+ index = children.index(insertBefore._element)
+ if index > 0:
+ if not self._element[index - 1].tail:
+ self._element[index - 1].tail = ""
+ self._element[index - 1].tail += data
+ else:
+ if not self._element.text:
+ self._element.text = ""
+ self._element.text += data
+
+ def cloneNode(self):
+ element = type(self)(self.name, self.namespace)
+ for name, value in self.attributes.items():
+ element.attributes[name] = value
+ return element
+
+ def reparentChildren(self, newParent):
+ if newParent.childNodes:
+ newParent.childNodes[-1]._element.tail += self._element.text
+ else:
+ if not newParent._element.text:
+ newParent._element.text = ""
+ if self._element.text is not None:
+ newParent._element.text += self._element.text
+ self._element.text = ""
+ _base.Node.reparentChildren(self, newParent)
+
+ class Comment(Element):
+ def __init__(self, data):
+ # Use the superclass constructor to set all properties on the
+ # wrapper element
+ self._element = ElementTree.Comment(data)
+ self.parent = None
+ self._childNodes = []
+ self._flags = []
+
+ def _getData(self):
+ return self._element.text
+
+ def _setData(self, value):
+ self._element.text = value
+
+ data = property(_getData, _setData)
+
+ class DocumentType(Element):
+ def __init__(self, name, publicId, systemId):
+ Element.__init__(self, "<!DOCTYPE>")
+ self._element.text = name
+ self.publicId = publicId
+ self.systemId = systemId
+
+ def _getPublicId(self):
+ return self._element.get("publicId", "")
+
+ def _setPublicId(self, value):
+ if value is not None:
+ self._element.set("publicId", value)
+
+ publicId = property(_getPublicId, _setPublicId)
+
+ def _getSystemId(self):
+ return self._element.get("systemId", "")
+
+ def _setSystemId(self, value):
+ if value is not None:
+ self._element.set("systemId", value)
+
+ systemId = property(_getSystemId, _setSystemId)
+
+ class Document(Element):
+ def __init__(self):
+ Element.__init__(self, "DOCUMENT_ROOT")
+
+ class DocumentFragment(Element):
+ def __init__(self):
+ Element.__init__(self, "DOCUMENT_FRAGMENT")
+
+ def testSerializer(element):
+ rv = []
+
+ def serializeElement(element, indent=0):
+ if not(hasattr(element, "tag")):
+ element = element.getroot()
+ if element.tag == "<!DOCTYPE>":
+ if element.get("publicId") or element.get("systemId"):
+ publicId = element.get("publicId") or ""
+ systemId = element.get("systemId") or ""
+ rv.append("""<!DOCTYPE %s "%s" "%s">""" %
+ (element.text, publicId, systemId))
+ else:
+ rv.append("<!DOCTYPE %s>" % (element.text,))
+ elif element.tag == "DOCUMENT_ROOT":
+ rv.append("#document")
+ if element.text is not None:
+ rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
+ if element.tail is not None:
+ raise TypeError("Document node cannot have tail")
+ if hasattr(element, "attrib") and len(element.attrib):
+ raise TypeError("Document node cannot have attributes")
+ elif element.tag == ElementTreeCommentType:
+ rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
+ else:
+ assert isinstance(element.tag, text_type), \
+ "Expected unicode, got %s, %s" % (type(element.tag), element.tag)
+ nsmatch = tag_regexp.match(element.tag)
+
+ if nsmatch is None:
+ name = element.tag
+ else:
+ ns, name = nsmatch.groups()
+ prefix = constants.prefixes[ns]
+ name = "%s %s" % (prefix, name)
+ rv.append("|%s<%s>" % (' ' * indent, name))
+
+ if hasattr(element, "attrib"):
+ attributes = []
+ for name, value in element.attrib.items():
+ nsmatch = tag_regexp.match(name)
+ if nsmatch is not None:
+ ns, name = nsmatch.groups()
+ prefix = constants.prefixes[ns]
+ attr_string = "%s %s" % (prefix, name)
+ else:
+ attr_string = name
+ attributes.append((attr_string, value))
+
+ for name, value in sorted(attributes):
+ rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
+ if element.text:
+ rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
+ indent += 2
+ for child in element:
+ serializeElement(child, indent)
+ if element.tail:
+ rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
+ serializeElement(element, 0)
+
+ return "\n".join(rv)
+
+ def tostring(element):
+ """Serialize an element and its child nodes to a string"""
+ rv = []
+ filter = ihatexml.InfosetFilter()
+
+ def serializeElement(element):
+ if isinstance(element, ElementTree.ElementTree):
+ element = element.getroot()
+
+ if element.tag == "<!DOCTYPE>":
+ if element.get("publicId") or element.get("systemId"):
+ publicId = element.get("publicId") or ""
+ systemId = element.get("systemId") or ""
+ rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
+ (element.text, publicId, systemId))
+ else:
+ rv.append("<!DOCTYPE %s>" % (element.text,))
+ elif element.tag == "DOCUMENT_ROOT":
+ if element.text is not None:
+ rv.append(element.text)
+ if element.tail is not None:
+ raise TypeError("Document node cannot have tail")
+ if hasattr(element, "attrib") and len(element.attrib):
+ raise TypeError("Document node cannot have attributes")
+
+ for child in element:
+ serializeElement(child)
+
+ elif element.tag == ElementTreeCommentType:
+ rv.append("<!--%s-->" % (element.text,))
+ else:
+ # This is assumed to be an ordinary element
+ if not element.attrib:
+ rv.append("<%s>" % (filter.fromXmlName(element.tag),))
+ else:
+ attr = " ".join(["%s=\"%s\"" % (
+ filter.fromXmlName(name), value)
+ for name, value in element.attrib.items()])
+ rv.append("<%s %s>" % (element.tag, attr))
+ if element.text:
+ rv.append(element.text)
+
+ for child in element:
+ serializeElement(child)
+
+ rv.append("</%s>" % (element.tag,))
+
+ if element.tail:
+ rv.append(element.tail)
+
+ serializeElement(element)
+
+ return "".join(rv)
+
+ class TreeBuilder(_base.TreeBuilder):
+ documentClass = Document
+ doctypeClass = DocumentType
+ elementClass = Element
+ commentClass = Comment
+ fragmentClass = DocumentFragment
+ implementation = ElementTreeImplementation
+
+ def testSerializer(self, element):
+ return testSerializer(element)
+
+ def getDocument(self):
+ if fullTree:
+ return self.document._element
+ else:
+ if self.defaultNamespace is not None:
+ return self.document._element.find(
+ "{%s}html" % self.defaultNamespace)
+ else:
+ return self.document._element.find("html")
+
+ def getFragment(self):
+ return _base.TreeBuilder.getFragment(self)._element
+
+ return locals()
+
+
+getETreeModule = moduleFactoryFactory(getETreeBuilder)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree_lxml.py b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree_lxml.py
new file mode 100644
index 000000000..35d08efaa
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treebuilders/etree_lxml.py
@@ -0,0 +1,369 @@
+"""Module for supporting the lxml.etree library. The idea here is to use as much
+of the native library as possible, without using fragile hacks like custom element
+names that break between releases. The downside of this is that we cannot represent
+all possible trees; specifically the following are known to cause problems:
+
+Text or comments as siblings of the root element
+Docypes with no name
+
+When any of these things occur, we emit a DataLossWarning
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+import warnings
+import re
+import sys
+
+from . import _base
+from ..constants import DataLossWarning
+from .. import constants
+from . import etree as etree_builders
+from .. import ihatexml
+
+import lxml.etree as etree
+
+
+fullTree = True
+tag_regexp = re.compile("{([^}]*)}(.*)")
+
+comment_type = etree.Comment("asd").tag
+
+
+class DocumentType(object):
+ def __init__(self, name, publicId, systemId):
+ self.name = name
+ self.publicId = publicId
+ self.systemId = systemId
+
+
+class Document(object):
+ def __init__(self):
+ self._elementTree = None
+ self._childNodes = []
+
+ def appendChild(self, element):
+ self._elementTree.getroot().addnext(element._element)
+
+ def _getChildNodes(self):
+ return self._childNodes
+
+ childNodes = property(_getChildNodes)
+
+
+def testSerializer(element):
+ rv = []
+ finalText = None
+ infosetFilter = ihatexml.InfosetFilter()
+
+ def serializeElement(element, indent=0):
+ if not hasattr(element, "tag"):
+ if hasattr(element, "getroot"):
+ # Full tree case
+ rv.append("#document")
+ if element.docinfo.internalDTD:
+ if not (element.docinfo.public_id or
+ element.docinfo.system_url):
+ dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
+ else:
+ dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
+ element.docinfo.root_name,
+ element.docinfo.public_id,
+ element.docinfo.system_url)
+ rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
+ next_element = element.getroot()
+ while next_element.getprevious() is not None:
+ next_element = next_element.getprevious()
+ while next_element is not None:
+ serializeElement(next_element, indent + 2)
+ next_element = next_element.getnext()
+ elif isinstance(element, str) or isinstance(element, bytes):
+ # Text in a fragment
+ assert isinstance(element, str) or sys.version_info.major == 2
+ rv.append("|%s\"%s\"" % (' ' * indent, element))
+ else:
+ # Fragment case
+ rv.append("#document-fragment")
+ for next_element in element:
+ serializeElement(next_element, indent + 2)
+ elif element.tag == comment_type:
+ rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
+ if hasattr(element, "tail") and element.tail:
+ rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
+ else:
+ assert isinstance(element, etree._Element)
+ nsmatch = etree_builders.tag_regexp.match(element.tag)
+ if nsmatch is not None:
+ ns = nsmatch.group(1)
+ tag = nsmatch.group(2)
+ prefix = constants.prefixes[ns]
+ rv.append("|%s<%s %s>" % (' ' * indent, prefix,
+ infosetFilter.fromXmlName(tag)))
+ else:
+ rv.append("|%s<%s>" % (' ' * indent,
+ infosetFilter.fromXmlName(element.tag)))
+
+ if hasattr(element, "attrib"):
+ attributes = []
+ for name, value in element.attrib.items():
+ nsmatch = tag_regexp.match(name)
+ if nsmatch is not None:
+ ns, name = nsmatch.groups()
+ name = infosetFilter.fromXmlName(name)
+ prefix = constants.prefixes[ns]
+ attr_string = "%s %s" % (prefix, name)
+ else:
+ attr_string = infosetFilter.fromXmlName(name)
+ attributes.append((attr_string, value))
+
+ for name, value in sorted(attributes):
+ rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
+
+ if element.text:
+ rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
+ indent += 2
+ for child in element:
+ serializeElement(child, indent)
+ if hasattr(element, "tail") and element.tail:
+ rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
+ serializeElement(element, 0)
+
+ if finalText is not None:
+ rv.append("|%s\"%s\"" % (' ' * 2, finalText))
+
+ return "\n".join(rv)
+
+
+def tostring(element):
+ """Serialize an element and its child nodes to a string"""
+ rv = []
+ finalText = None
+
+ def serializeElement(element):
+ if not hasattr(element, "tag"):
+ if element.docinfo.internalDTD:
+ if element.docinfo.doctype:
+ dtd_str = element.docinfo.doctype
+ else:
+ dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
+ rv.append(dtd_str)
+ serializeElement(element.getroot())
+
+ elif element.tag == comment_type:
+ rv.append("<!--%s-->" % (element.text,))
+
+ else:
+ # This is assumed to be an ordinary element
+ if not element.attrib:
+ rv.append("<%s>" % (element.tag,))
+ else:
+ attr = " ".join(["%s=\"%s\"" % (name, value)
+ for name, value in element.attrib.items()])
+ rv.append("<%s %s>" % (element.tag, attr))
+ if element.text:
+ rv.append(element.text)
+
+ for child in element:
+ serializeElement(child)
+
+ rv.append("</%s>" % (element.tag,))
+
+ if hasattr(element, "tail") and element.tail:
+ rv.append(element.tail)
+
+ serializeElement(element)
+
+ if finalText is not None:
+ rv.append("%s\"" % (' ' * 2, finalText))
+
+ return "".join(rv)
+
+
+class TreeBuilder(_base.TreeBuilder):
+ documentClass = Document
+ doctypeClass = DocumentType
+ elementClass = None
+ commentClass = None
+ fragmentClass = Document
+ implementation = etree
+
+ def __init__(self, namespaceHTMLElements, fullTree=False):
+ builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
+ infosetFilter = self.infosetFilter = ihatexml.InfosetFilter()
+ self.namespaceHTMLElements = namespaceHTMLElements
+
+ class Attributes(dict):
+ def __init__(self, element, value={}):
+ self._element = element
+ dict.__init__(self, value)
+ for key, value in self.items():
+ if isinstance(key, tuple):
+ name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
+ else:
+ name = infosetFilter.coerceAttribute(key)
+ self._element._element.attrib[name] = value
+
+ def __setitem__(self, key, value):
+ dict.__setitem__(self, key, value)
+ if isinstance(key, tuple):
+ name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
+ else:
+ name = infosetFilter.coerceAttribute(key)
+ self._element._element.attrib[name] = value
+
+ class Element(builder.Element):
+ def __init__(self, name, namespace):
+ name = infosetFilter.coerceElement(name)
+ builder.Element.__init__(self, name, namespace=namespace)
+ self._attributes = Attributes(self)
+
+ def _setName(self, name):
+ self._name = infosetFilter.coerceElement(name)
+ self._element.tag = self._getETreeTag(
+ self._name, self._namespace)
+
+ def _getName(self):
+ return infosetFilter.fromXmlName(self._name)
+
+ name = property(_getName, _setName)
+
+ def _getAttributes(self):
+ return self._attributes
+
+ def _setAttributes(self, attributes):
+ self._attributes = Attributes(self, attributes)
+
+ attributes = property(_getAttributes, _setAttributes)
+
+ def insertText(self, data, insertBefore=None):
+ data = infosetFilter.coerceCharacters(data)
+ builder.Element.insertText(self, data, insertBefore)
+
+ def appendChild(self, child):
+ builder.Element.appendChild(self, child)
+
+ class Comment(builder.Comment):
+ def __init__(self, data):
+ data = infosetFilter.coerceComment(data)
+ builder.Comment.__init__(self, data)
+
+ def _setData(self, data):
+ data = infosetFilter.coerceComment(data)
+ self._element.text = data
+
+ def _getData(self):
+ return self._element.text
+
+ data = property(_getData, _setData)
+
+ self.elementClass = Element
+ self.commentClass = builder.Comment
+ # self.fragmentClass = builder.DocumentFragment
+ _base.TreeBuilder.__init__(self, namespaceHTMLElements)
+
+ def reset(self):
+ _base.TreeBuilder.reset(self)
+ self.insertComment = self.insertCommentInitial
+ self.initial_comments = []
+ self.doctype = None
+
+ def testSerializer(self, element):
+ return testSerializer(element)
+
+ def getDocument(self):
+ if fullTree:
+ return self.document._elementTree
+ else:
+ return self.document._elementTree.getroot()
+
+ def getFragment(self):
+ fragment = []
+ element = self.openElements[0]._element
+ if element.text:
+ fragment.append(element.text)
+ fragment.extend(list(element))
+ if element.tail:
+ fragment.append(element.tail)
+ return fragment
+
+ def insertDoctype(self, token):
+ name = token["name"]
+ publicId = token["publicId"]
+ systemId = token["systemId"]
+
+ if not name:
+ warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
+ self.doctype = None
+ else:
+ coercedName = self.infosetFilter.coerceElement(name)
+ if coercedName != name:
+ warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
+
+ doctype = self.doctypeClass(coercedName, publicId, systemId)
+ self.doctype = doctype
+
+ def insertCommentInitial(self, data, parent=None):
+ self.initial_comments.append(data)
+
+ def insertCommentMain(self, data, parent=None):
+ if (parent == self.document and
+ self.document._elementTree.getroot()[-1].tag == comment_type):
+ warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
+ super(TreeBuilder, self).insertComment(data, parent)
+
+ def insertRoot(self, token):
+ """Create the document root"""
+ # Because of the way libxml2 works, it doesn't seem to be possible to
+ # alter information like the doctype after the tree has been parsed.
+ # Therefore we need to use the built-in parser to create our iniial
+ # tree, after which we can add elements like normal
+ docStr = ""
+ if self.doctype:
+ assert self.doctype.name
+ docStr += "<!DOCTYPE %s" % self.doctype.name
+ if (self.doctype.publicId is not None or
+ self.doctype.systemId is not None):
+ docStr += (' PUBLIC "%s" ' %
+ (self.infosetFilter.coercePubid(self.doctype.publicId or "")))
+ if self.doctype.systemId:
+ sysid = self.doctype.systemId
+ if sysid.find("'") >= 0 and sysid.find('"') >= 0:
+ warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
+ sysid = sysid.replace("'", 'U00027')
+ if sysid.find("'") >= 0:
+ docStr += '"%s"' % sysid
+ else:
+ docStr += "'%s'" % sysid
+ else:
+ docStr += "''"
+ docStr += ">"
+ if self.doctype.name != token["name"]:
+ warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
+ docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
+ root = etree.fromstring(docStr)
+
+ # Append the initial comments:
+ for comment_token in self.initial_comments:
+ root.addprevious(etree.Comment(comment_token["data"]))
+
+ # Create the root document and add the ElementTree to it
+ self.document = self.documentClass()
+ self.document._elementTree = root.getroottree()
+
+ # Give the root element the right name
+ name = token["name"]
+ namespace = token.get("namespace", self.defaultNamespace)
+ if namespace is None:
+ etree_tag = name
+ else:
+ etree_tag = "{%s}%s" % (namespace, name)
+ root.tag = etree_tag
+
+ # Add the root element to the internal child/open data structures
+ root_element = self.elementClass(name, namespace)
+ root_element._element = root
+ self.document._childNodes.append(root_element)
+ self.openElements.append(root_element)
+
+ # Reset to the default insert comment function
+ self.insertComment = self.insertCommentMain
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/__init__.py
new file mode 100644
index 000000000..18124e75f
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/__init__.py
@@ -0,0 +1,57 @@
+"""A collection of modules for iterating through different kinds of
+tree, generating tokens identical to those produced by the tokenizer
+module.
+
+To create a tree walker for a new type of tree, you need to do
+implement a tree walker object (called TreeWalker by convention) that
+implements a 'serialize' method taking a tree as sole argument and
+returning an iterator generating tokens.
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+import sys
+
+from ..utils import default_etree
+
+treeWalkerCache = {}
+
+
+def getTreeWalker(treeType, implementation=None, **kwargs):
+ """Get a TreeWalker class for various types of tree with built-in support
+
+ treeType - the name of the tree type required (case-insensitive). Supported
+ values are:
+
+ "dom" - The xml.dom.minidom DOM implementation
+ "pulldom" - The xml.dom.pulldom event stream
+ "etree" - A generic walker for tree implementations exposing an
+ elementtree-like interface (known to work with
+ ElementTree, cElementTree and lxml.etree).
+ "lxml" - Optimized walker for lxml.etree
+ "genshi" - a Genshi stream
+
+ implementation - (Currently applies to the "etree" tree type only). A module
+ implementing the tree type e.g. xml.etree.ElementTree or
+ cElementTree."""
+
+ treeType = treeType.lower()
+ if treeType not in treeWalkerCache:
+ if treeType in ("dom", "pulldom"):
+ name = "%s.%s" % (__name__, treeType)
+ __import__(name)
+ mod = sys.modules[name]
+ treeWalkerCache[treeType] = mod.TreeWalker
+ elif treeType == "genshi":
+ from . import genshistream
+ treeWalkerCache[treeType] = genshistream.TreeWalker
+ elif treeType == "lxml":
+ from . import lxmletree
+ treeWalkerCache[treeType] = lxmletree.TreeWalker
+ elif treeType == "etree":
+ from . import etree
+ if implementation is None:
+ implementation = default_etree
+ # XXX: NEVER cache here, caching is done in the etree submodule
+ return etree.getETreeModule(implementation, **kwargs).TreeWalker
+ return treeWalkerCache.get(treeType)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/_base.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/_base.py
new file mode 100644
index 000000000..34252e50c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/_base.py
@@ -0,0 +1,200 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type, string_types
+
+import gettext
+_ = gettext.gettext
+
+from xml.dom import Node
+
+DOCUMENT = Node.DOCUMENT_NODE
+DOCTYPE = Node.DOCUMENT_TYPE_NODE
+TEXT = Node.TEXT_NODE
+ELEMENT = Node.ELEMENT_NODE
+COMMENT = Node.COMMENT_NODE
+ENTITY = Node.ENTITY_NODE
+UNKNOWN = "<#UNKNOWN#>"
+
+from ..constants import voidElements, spaceCharacters
+spaceCharacters = "".join(spaceCharacters)
+
+
+def to_text(s, blank_if_none=True):
+ """Wrapper around six.text_type to convert None to empty string"""
+ if s is None:
+ if blank_if_none:
+ return ""
+ else:
+ return None
+ elif isinstance(s, text_type):
+ return s
+ else:
+ return text_type(s)
+
+
+def is_text_or_none(string):
+ """Wrapper around isinstance(string_types) or is None"""
+ return string is None or isinstance(string, string_types)
+
+
+class TreeWalker(object):
+ def __init__(self, tree):
+ self.tree = tree
+
+ def __iter__(self):
+ raise NotImplementedError
+
+ def error(self, msg):
+ return {"type": "SerializeError", "data": msg}
+
+ def emptyTag(self, namespace, name, attrs, hasChildren=False):
+ assert namespace is None or isinstance(namespace, string_types), type(namespace)
+ assert isinstance(name, string_types), type(name)
+ assert all((namespace is None or isinstance(namespace, string_types)) and
+ isinstance(name, string_types) and
+ isinstance(value, string_types)
+ for (namespace, name), value in attrs.items())
+
+ yield {"type": "EmptyTag", "name": to_text(name, False),
+ "namespace": to_text(namespace),
+ "data": attrs}
+ if hasChildren:
+ yield self.error(_("Void element has children"))
+
+ def startTag(self, namespace, name, attrs):
+ assert namespace is None or isinstance(namespace, string_types), type(namespace)
+ assert isinstance(name, string_types), type(name)
+ assert all((namespace is None or isinstance(namespace, string_types)) and
+ isinstance(name, string_types) and
+ isinstance(value, string_types)
+ for (namespace, name), value in attrs.items())
+
+ return {"type": "StartTag",
+ "name": text_type(name),
+ "namespace": to_text(namespace),
+ "data": dict(((to_text(namespace, False), to_text(name)),
+ to_text(value, False))
+ for (namespace, name), value in attrs.items())}
+
+ def endTag(self, namespace, name):
+ assert namespace is None or isinstance(namespace, string_types), type(namespace)
+ assert isinstance(name, string_types), type(namespace)
+
+ return {"type": "EndTag",
+ "name": to_text(name, False),
+ "namespace": to_text(namespace),
+ "data": {}}
+
+ def text(self, data):
+ assert isinstance(data, string_types), type(data)
+
+ data = to_text(data)
+ middle = data.lstrip(spaceCharacters)
+ left = data[:len(data) - len(middle)]
+ if left:
+ yield {"type": "SpaceCharacters", "data": left}
+ data = middle
+ middle = data.rstrip(spaceCharacters)
+ right = data[len(middle):]
+ if middle:
+ yield {"type": "Characters", "data": middle}
+ if right:
+ yield {"type": "SpaceCharacters", "data": right}
+
+ def comment(self, data):
+ assert isinstance(data, string_types), type(data)
+
+ return {"type": "Comment", "data": text_type(data)}
+
+ def doctype(self, name, publicId=None, systemId=None, correct=True):
+ assert is_text_or_none(name), type(name)
+ assert is_text_or_none(publicId), type(publicId)
+ assert is_text_or_none(systemId), type(systemId)
+
+ return {"type": "Doctype",
+ "name": to_text(name),
+ "publicId": to_text(publicId),
+ "systemId": to_text(systemId),
+ "correct": to_text(correct)}
+
+ def entity(self, name):
+ assert isinstance(name, string_types), type(name)
+
+ return {"type": "Entity", "name": text_type(name)}
+
+ def unknown(self, nodeType):
+ return self.error(_("Unknown node type: ") + nodeType)
+
+
+class NonRecursiveTreeWalker(TreeWalker):
+ def getNodeDetails(self, node):
+ raise NotImplementedError
+
+ def getFirstChild(self, node):
+ raise NotImplementedError
+
+ def getNextSibling(self, node):
+ raise NotImplementedError
+
+ def getParentNode(self, node):
+ raise NotImplementedError
+
+ def __iter__(self):
+ currentNode = self.tree
+ while currentNode is not None:
+ details = self.getNodeDetails(currentNode)
+ type, details = details[0], details[1:]
+ hasChildren = False
+
+ if type == DOCTYPE:
+ yield self.doctype(*details)
+
+ elif type == TEXT:
+ for token in self.text(*details):
+ yield token
+
+ elif type == ELEMENT:
+ namespace, name, attributes, hasChildren = details
+ if name in voidElements:
+ for token in self.emptyTag(namespace, name, attributes,
+ hasChildren):
+ yield token
+ hasChildren = False
+ else:
+ yield self.startTag(namespace, name, attributes)
+
+ elif type == COMMENT:
+ yield self.comment(details[0])
+
+ elif type == ENTITY:
+ yield self.entity(details[0])
+
+ elif type == DOCUMENT:
+ hasChildren = True
+
+ else:
+ yield self.unknown(details[0])
+
+ if hasChildren:
+ firstChild = self.getFirstChild(currentNode)
+ else:
+ firstChild = None
+
+ if firstChild is not None:
+ currentNode = firstChild
+ else:
+ while currentNode is not None:
+ details = self.getNodeDetails(currentNode)
+ type, details = details[0], details[1:]
+ if type == ELEMENT:
+ namespace, name, attributes, hasChildren = details
+ if name not in voidElements:
+ yield self.endTag(namespace, name)
+ if self.tree is currentNode:
+ currentNode = None
+ break
+ nextSibling = self.getNextSibling(currentNode)
+ if nextSibling is not None:
+ currentNode = nextSibling
+ break
+ else:
+ currentNode = self.getParentNode(currentNode)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/dom.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/dom.py
new file mode 100644
index 000000000..a01287a94
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/dom.py
@@ -0,0 +1,46 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from xml.dom import Node
+
+import gettext
+_ = gettext.gettext
+
+from . import _base
+
+
+class TreeWalker(_base.NonRecursiveTreeWalker):
+ def getNodeDetails(self, node):
+ if node.nodeType == Node.DOCUMENT_TYPE_NODE:
+ return _base.DOCTYPE, node.name, node.publicId, node.systemId
+
+ elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ return _base.TEXT, node.nodeValue
+
+ elif node.nodeType == Node.ELEMENT_NODE:
+ attrs = {}
+ for attr in list(node.attributes.keys()):
+ attr = node.getAttributeNode(attr)
+ if attr.namespaceURI:
+ attrs[(attr.namespaceURI, attr.localName)] = attr.value
+ else:
+ attrs[(None, attr.name)] = attr.value
+ return (_base.ELEMENT, node.namespaceURI, node.nodeName,
+ attrs, node.hasChildNodes())
+
+ elif node.nodeType == Node.COMMENT_NODE:
+ return _base.COMMENT, node.nodeValue
+
+ elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
+ return (_base.DOCUMENT,)
+
+ else:
+ return _base.UNKNOWN, node.nodeType
+
+ def getFirstChild(self, node):
+ return node.firstChild
+
+ def getNextSibling(self, node):
+ return node.nextSibling
+
+ def getParentNode(self, node):
+ return node.parentNode
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/etree.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/etree.py
new file mode 100644
index 000000000..fd8a9cc9b
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/etree.py
@@ -0,0 +1,138 @@
+from __future__ import absolute_import, division, unicode_literals
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ except ImportError:
+ OrderedDict = dict
+import gettext
+_ = gettext.gettext
+
+import re
+
+from six import text_type
+
+from . import _base
+from ..utils import moduleFactoryFactory
+
+tag_regexp = re.compile("{([^}]*)}(.*)")
+
+
+def getETreeBuilder(ElementTreeImplementation):
+ ElementTree = ElementTreeImplementation
+ ElementTreeCommentType = ElementTree.Comment("asd").tag
+
+ class TreeWalker(_base.NonRecursiveTreeWalker):
+ """Given the particular ElementTree representation, this implementation,
+ to avoid using recursion, returns "nodes" as tuples with the following
+ content:
+
+ 1. The current element
+
+ 2. The index of the element relative to its parent
+
+ 3. A stack of ancestor elements
+
+ 4. A flag "text", "tail" or None to indicate if the current node is a
+ text node; either the text or tail of the current element (1)
+ """
+ def getNodeDetails(self, node):
+ if isinstance(node, tuple): # It might be the root Element
+ elt, key, parents, flag = node
+ if flag in ("text", "tail"):
+ return _base.TEXT, getattr(elt, flag)
+ else:
+ node = elt
+
+ if not(hasattr(node, "tag")):
+ node = node.getroot()
+
+ if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
+ return (_base.DOCUMENT,)
+
+ elif node.tag == "<!DOCTYPE>":
+ return (_base.DOCTYPE, node.text,
+ node.get("publicId"), node.get("systemId"))
+
+ elif node.tag == ElementTreeCommentType:
+ return _base.COMMENT, node.text
+
+ else:
+ assert type(node.tag) == text_type, type(node.tag)
+ # This is assumed to be an ordinary element
+ match = tag_regexp.match(node.tag)
+ if match:
+ namespace, tag = match.groups()
+ else:
+ namespace = None
+ tag = node.tag
+ attrs = OrderedDict()
+ for name, value in list(node.attrib.items()):
+ match = tag_regexp.match(name)
+ if match:
+ attrs[(match.group(1), match.group(2))] = value
+ else:
+ attrs[(None, name)] = value
+ return (_base.ELEMENT, namespace, tag,
+ attrs, len(node) or node.text)
+
+ def getFirstChild(self, node):
+ if isinstance(node, tuple):
+ element, key, parents, flag = node
+ else:
+ element, key, parents, flag = node, None, [], None
+
+ if flag in ("text", "tail"):
+ return None
+ else:
+ if element.text:
+ return element, key, parents, "text"
+ elif len(element):
+ parents.append(element)
+ return element[0], 0, parents, None
+ else:
+ return None
+
+ def getNextSibling(self, node):
+ if isinstance(node, tuple):
+ element, key, parents, flag = node
+ else:
+ return None
+
+ if flag == "text":
+ if len(element):
+ parents.append(element)
+ return element[0], 0, parents, None
+ else:
+ return None
+ else:
+ if element.tail and flag != "tail":
+ return element, key, parents, "tail"
+ elif key < len(parents[-1]) - 1:
+ return parents[-1][key + 1], key + 1, parents, None
+ else:
+ return None
+
+ def getParentNode(self, node):
+ if isinstance(node, tuple):
+ element, key, parents, flag = node
+ else:
+ return None
+
+ if flag == "text":
+ if not parents:
+ return element
+ else:
+ return element, key, parents, None
+ else:
+ parent = parents.pop()
+ if not parents:
+ return parent
+ else:
+ return parent, list(parents[-1]).index(parent), parents, None
+
+ return locals()
+
+getETreeModule = moduleFactoryFactory(getETreeBuilder)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/genshistream.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/genshistream.py
new file mode 100644
index 000000000..f559c45d0
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/genshistream.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from genshi.core import QName
+from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
+from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
+
+from . import _base
+
+from ..constants import voidElements, namespaces
+
+
+class TreeWalker(_base.TreeWalker):
+ def __iter__(self):
+ # Buffer the events so we can pass in the following one
+ previous = None
+ for event in self.tree:
+ if previous is not None:
+ for token in self.tokens(previous, event):
+ yield token
+ previous = event
+
+ # Don't forget the final event!
+ if previous is not None:
+ for token in self.tokens(previous, None):
+ yield token
+
+ def tokens(self, event, next):
+ kind, data, pos = event
+ if kind == START:
+ tag, attribs = data
+ name = tag.localname
+ namespace = tag.namespace
+ converted_attribs = {}
+ for k, v in attribs:
+ if isinstance(k, QName):
+ converted_attribs[(k.namespace, k.localname)] = v
+ else:
+ converted_attribs[(None, k)] = v
+
+ if namespace == namespaces["html"] and name in voidElements:
+ for token in self.emptyTag(namespace, name, converted_attribs,
+ not next or next[0] != END
+ or next[1] != tag):
+ yield token
+ else:
+ yield self.startTag(namespace, name, converted_attribs)
+
+ elif kind == END:
+ name = data.localname
+ namespace = data.namespace
+ if name not in voidElements:
+ yield self.endTag(namespace, name)
+
+ elif kind == COMMENT:
+ yield self.comment(data)
+
+ elif kind == TEXT:
+ for token in self.text(data):
+ yield token
+
+ elif kind == DOCTYPE:
+ yield self.doctype(*data)
+
+ elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
+ START_CDATA, END_CDATA, PI):
+ pass
+
+ else:
+ yield self.unknown(kind)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/lxmletree.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/lxmletree.py
new file mode 100644
index 000000000..bc934ac05
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/lxmletree.py
@@ -0,0 +1,204 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+
+from lxml import etree
+from ..treebuilders.etree import tag_regexp
+
+from gettext import gettext
+_ = gettext
+
+from . import _base
+
+from .. import ihatexml
+
+
+def ensure_str(s):
+ if s is None:
+ return None
+ elif isinstance(s, text_type):
+ return s
+ else:
+ return s.decode("utf-8", "strict")
+
+
+class Root(object):
+ def __init__(self, et):
+ self.elementtree = et
+ self.children = []
+ if et.docinfo.internalDTD:
+ self.children.append(Doctype(self,
+ ensure_str(et.docinfo.root_name),
+ ensure_str(et.docinfo.public_id),
+ ensure_str(et.docinfo.system_url)))
+ root = et.getroot()
+ node = root
+
+ while node.getprevious() is not None:
+ node = node.getprevious()
+ while node is not None:
+ self.children.append(node)
+ node = node.getnext()
+
+ self.text = None
+ self.tail = None
+
+ def __getitem__(self, key):
+ return self.children[key]
+
+ def getnext(self):
+ return None
+
+ def __len__(self):
+ return 1
+
+
+class Doctype(object):
+ def __init__(self, root_node, name, public_id, system_id):
+ self.root_node = root_node
+ self.name = name
+ self.public_id = public_id
+ self.system_id = system_id
+
+ self.text = None
+ self.tail = None
+
+ def getnext(self):
+ return self.root_node.children[1]
+
+
+class FragmentRoot(Root):
+ def __init__(self, children):
+ self.children = [FragmentWrapper(self, child) for child in children]
+ self.text = self.tail = None
+
+ def getnext(self):
+ return None
+
+
+class FragmentWrapper(object):
+ def __init__(self, fragment_root, obj):
+ self.root_node = fragment_root
+ self.obj = obj
+ if hasattr(self.obj, 'text'):
+ self.text = ensure_str(self.obj.text)
+ else:
+ self.text = None
+ if hasattr(self.obj, 'tail'):
+ self.tail = ensure_str(self.obj.tail)
+ else:
+ self.tail = None
+
+ def __getattr__(self, name):
+ return getattr(self.obj, name)
+
+ def getnext(self):
+ siblings = self.root_node.children
+ idx = siblings.index(self)
+ if idx < len(siblings) - 1:
+ return siblings[idx + 1]
+ else:
+ return None
+
+ def __getitem__(self, key):
+ return self.obj[key]
+
+ def __bool__(self):
+ return bool(self.obj)
+
+ def getparent(self):
+ return None
+
+ def __str__(self):
+ return str(self.obj)
+
+ def __unicode__(self):
+ return str(self.obj)
+
+ def __len__(self):
+ return len(self.obj)
+
+
+class TreeWalker(_base.NonRecursiveTreeWalker):
+ def __init__(self, tree):
+ if hasattr(tree, "getroot"):
+ tree = Root(tree)
+ elif isinstance(tree, list):
+ tree = FragmentRoot(tree)
+ _base.NonRecursiveTreeWalker.__init__(self, tree)
+ self.filter = ihatexml.InfosetFilter()
+
+ def getNodeDetails(self, node):
+ if isinstance(node, tuple): # Text node
+ node, key = node
+ assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
+ return _base.TEXT, ensure_str(getattr(node, key))
+
+ elif isinstance(node, Root):
+ return (_base.DOCUMENT,)
+
+ elif isinstance(node, Doctype):
+ return _base.DOCTYPE, node.name, node.public_id, node.system_id
+
+ elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
+ return _base.TEXT, node.obj
+
+ elif node.tag == etree.Comment:
+ return _base.COMMENT, ensure_str(node.text)
+
+ elif node.tag == etree.Entity:
+ return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
+
+ else:
+ # This is assumed to be an ordinary element
+ match = tag_regexp.match(ensure_str(node.tag))
+ if match:
+ namespace, tag = match.groups()
+ else:
+ namespace = None
+ tag = ensure_str(node.tag)
+ attrs = {}
+ for name, value in list(node.attrib.items()):
+ name = ensure_str(name)
+ value = ensure_str(value)
+ match = tag_regexp.match(name)
+ if match:
+ attrs[(match.group(1), match.group(2))] = value
+ else:
+ attrs[(None, name)] = value
+ return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
+ attrs, len(node) > 0 or node.text)
+
+ def getFirstChild(self, node):
+ assert not isinstance(node, tuple), _("Text nodes have no children")
+
+ assert len(node) or node.text, "Node has no children"
+ if node.text:
+ return (node, "text")
+ else:
+ return node[0]
+
+ def getNextSibling(self, node):
+ if isinstance(node, tuple): # Text node
+ node, key = node
+ assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
+ if key == "text":
+ # XXX: we cannot use a "bool(node) and node[0] or None" construct here
+ # because node[0] might evaluate to False if it has no child element
+ if len(node):
+ return node[0]
+ else:
+ return None
+ else: # tail
+ return node.getnext()
+
+ return (node, "tail") if node.tail else node.getnext()
+
+ def getParentNode(self, node):
+ if isinstance(node, tuple): # Text node
+ node, key = node
+ assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
+ if key == "text":
+ return node
+ # else: fallback to "normal" processing
+
+ return node.getparent()
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/pulldom.py b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/pulldom.py
new file mode 100644
index 000000000..0b0f515fe
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/treewalkers/pulldom.py
@@ -0,0 +1,63 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
+ COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
+
+from . import _base
+
+from ..constants import voidElements
+
+
+class TreeWalker(_base.TreeWalker):
+ def __iter__(self):
+ ignore_until = None
+ previous = None
+ for event in self.tree:
+ if previous is not None and \
+ (ignore_until is None or previous[1] is ignore_until):
+ if previous[1] is ignore_until:
+ ignore_until = None
+ for token in self.tokens(previous, event):
+ yield token
+ if token["type"] == "EmptyTag":
+ ignore_until = previous[1]
+ previous = event
+ if ignore_until is None or previous[1] is ignore_until:
+ for token in self.tokens(previous, None):
+ yield token
+ elif ignore_until is not None:
+ raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
+
+ def tokens(self, event, next):
+ type, node = event
+ if type == START_ELEMENT:
+ name = node.nodeName
+ namespace = node.namespaceURI
+ attrs = {}
+ for attr in list(node.attributes.keys()):
+ attr = node.getAttributeNode(attr)
+ attrs[(attr.namespaceURI, attr.localName)] = attr.value
+ if name in voidElements:
+ for token in self.emptyTag(namespace,
+ name,
+ attrs,
+ not next or next[1] is not node):
+ yield token
+ else:
+ yield self.startTag(namespace, name, attrs)
+
+ elif type == END_ELEMENT:
+ name = node.nodeName
+ namespace = node.namespaceURI
+ if name not in voidElements:
+ yield self.endTag(namespace, name)
+
+ elif type == COMMENT:
+ yield self.comment(node.nodeValue)
+
+ elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
+ for token in self.text(node.nodeValue):
+ yield token
+
+ else:
+ yield self.unknown(type)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/trie/__init__.py b/testing/web-platform/tests/tools/html5lib/html5lib/trie/__init__.py
new file mode 100644
index 000000000..a8cca8a9a
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/trie/__init__.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from .py import Trie as PyTrie
+
+Trie = PyTrie
+
+try:
+ from .datrie import Trie as DATrie
+except ImportError:
+ pass
+else:
+ Trie = DATrie
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/trie/_base.py b/testing/web-platform/tests/tools/html5lib/html5lib/trie/_base.py
new file mode 100644
index 000000000..724486b16
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/trie/_base.py
@@ -0,0 +1,37 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from collections import Mapping
+
+
+class Trie(Mapping):
+ """Abstract base class for tries"""
+
+ def keys(self, prefix=None):
+ keys = super().keys()
+
+ if prefix is None:
+ return set(keys)
+
+ # Python 2.6: no set comprehensions
+ return set([x for x in keys if x.startswith(prefix)])
+
+ def has_keys_with_prefix(self, prefix):
+ for key in self.keys():
+ if key.startswith(prefix):
+ return True
+
+ return False
+
+ def longest_prefix(self, prefix):
+ if prefix in self:
+ return prefix
+
+ for i in range(1, len(prefix) + 1):
+ if prefix[:-i] in self:
+ return prefix[:-i]
+
+ raise KeyError(prefix)
+
+ def longest_prefix_item(self, prefix):
+ lprefix = self.longest_prefix(prefix)
+ return (lprefix, self[lprefix])
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/trie/datrie.py b/testing/web-platform/tests/tools/html5lib/html5lib/trie/datrie.py
new file mode 100644
index 000000000..51f3d046a
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/trie/datrie.py
@@ -0,0 +1,44 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from datrie import Trie as DATrie
+from six import text_type
+
+from ._base import Trie as ABCTrie
+
+
+class Trie(ABCTrie):
+ def __init__(self, data):
+ chars = set()
+ for key in data.keys():
+ if not isinstance(key, text_type):
+ raise TypeError("All keys must be strings")
+ for char in key:
+ chars.add(char)
+
+ self._data = DATrie("".join(chars))
+ for key, value in data.items():
+ self._data[key] = value
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ raise NotImplementedError()
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def keys(self, prefix=None):
+ return self._data.keys(prefix)
+
+ def has_keys_with_prefix(self, prefix):
+ return self._data.has_keys_with_prefix(prefix)
+
+ def longest_prefix(self, prefix):
+ return self._data.longest_prefix(prefix)
+
+ def longest_prefix_item(self, prefix):
+ return self._data.longest_prefix_item(prefix)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/trie/py.py b/testing/web-platform/tests/tools/html5lib/html5lib/trie/py.py
new file mode 100644
index 000000000..c2ba3da75
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/trie/py.py
@@ -0,0 +1,67 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+
+from bisect import bisect_left
+
+from ._base import Trie as ABCTrie
+
+
+class Trie(ABCTrie):
+ def __init__(self, data):
+ if not all(isinstance(x, text_type) for x in data.keys()):
+ raise TypeError("All keys must be strings")
+
+ self._data = data
+ self._keys = sorted(data.keys())
+ self._cachestr = ""
+ self._cachepoints = (0, len(data))
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def keys(self, prefix=None):
+ if prefix is None or prefix == "" or not self._keys:
+ return set(self._keys)
+
+ if prefix.startswith(self._cachestr):
+ lo, hi = self._cachepoints
+ start = i = bisect_left(self._keys, prefix, lo, hi)
+ else:
+ start = i = bisect_left(self._keys, prefix)
+
+ keys = set()
+ if start == len(self._keys):
+ return keys
+
+ while self._keys[i].startswith(prefix):
+ keys.add(self._keys[i])
+ i += 1
+
+ self._cachestr = prefix
+ self._cachepoints = (start, i)
+
+ return keys
+
+ def has_keys_with_prefix(self, prefix):
+ if prefix in self._data:
+ return True
+
+ if prefix.startswith(self._cachestr):
+ lo, hi = self._cachepoints
+ i = bisect_left(self._keys, prefix, lo, hi)
+ else:
+ i = bisect_left(self._keys, prefix)
+
+ if i == len(self._keys):
+ return False
+
+ return self._keys[i].startswith(prefix)
diff --git a/testing/web-platform/tests/tools/html5lib/html5lib/utils.py b/testing/web-platform/tests/tools/html5lib/html5lib/utils.py
new file mode 100644
index 000000000..2f41f4dfa
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/html5lib/utils.py
@@ -0,0 +1,82 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from types import ModuleType
+
+try:
+ import xml.etree.cElementTree as default_etree
+except ImportError:
+ import xml.etree.ElementTree as default_etree
+
+
+__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
+ "surrogatePairToCodepoint", "moduleFactoryFactory"]
+
+
+class MethodDispatcher(dict):
+ """Dict with 2 special properties:
+
+ On initiation, keys that are lists, sets or tuples are converted to
+ multiple keys so accessing any one of the items in the original
+ list-like object returns the matching value
+
+ md = MethodDispatcher({("foo", "bar"):"baz"})
+ md["foo"] == "baz"
+
+ A default value which can be set through the default attribute.
+ """
+
+ def __init__(self, items=()):
+ # Using _dictEntries instead of directly assigning to self is about
+ # twice as fast. Please do careful performance testing before changing
+ # anything here.
+ _dictEntries = []
+ for name, value in items:
+ if type(name) in (list, tuple, frozenset, set):
+ for item in name:
+ _dictEntries.append((item, value))
+ else:
+ _dictEntries.append((name, value))
+ dict.__init__(self, _dictEntries)
+ self.default = None
+
+ def __getitem__(self, key):
+ return dict.get(self, key, self.default)
+
+
+# Some utility functions to dal with weirdness around UCS2 vs UCS4
+# python builds
+
+def isSurrogatePair(data):
+ return (len(data) == 2 and
+ ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
+ ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
+
+
+def surrogatePairToCodepoint(data):
+ char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
+ (ord(data[1]) - 0xDC00))
+ return char_val
+
+# Module Factory Factory (no, this isn't Java, I know)
+# Here to stop this being duplicated all over the place.
+
+
+def moduleFactoryFactory(factory):
+ moduleCache = {}
+
+ def moduleFactory(baseModule, *args, **kwargs):
+ if isinstance(ModuleType.__name__, type("")):
+ name = "_%s_factory" % baseModule.__name__
+ else:
+ name = b"_%s_factory" % baseModule.__name__
+
+ if name in moduleCache:
+ return moduleCache[name]
+ else:
+ mod = ModuleType(name)
+ objs = factory(baseModule, *args, **kwargs)
+ mod.__dict__.update(objs)
+ moduleCache[name] = mod
+ return mod
+
+ return moduleFactory
diff --git a/testing/web-platform/tests/tools/html5lib/parse.py b/testing/web-platform/tests/tools/html5lib/parse.py
new file mode 100755
index 000000000..9cbf3b8d1
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/parse.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python
+"""usage: %prog [options] filename
+
+Parse a document to a tree, with optional profiling
+"""
+
+import sys
+import os
+import traceback
+from optparse import OptionParser
+
+from html5lib import html5parser, sanitizer
+from html5lib.tokenizer import HTMLTokenizer
+from html5lib import treebuilders, serializer, treewalkers
+from html5lib import constants
+
+def parse():
+ optParser = getOptParser()
+ opts,args = optParser.parse_args()
+ encoding = "utf8"
+
+ try:
+ f = args[-1]
+ # Try opening from the internet
+ if f.startswith('http://'):
+ try:
+ import urllib.request, urllib.parse, urllib.error, cgi
+ f = urllib.request.urlopen(f)
+ contentType = f.headers.get('content-type')
+ if contentType:
+ (mediaType, params) = cgi.parse_header(contentType)
+ encoding = params.get('charset')
+ except:
+ pass
+ elif f == '-':
+ f = sys.stdin
+ if sys.version_info[0] >= 3:
+ encoding = None
+ else:
+ try:
+ # Try opening from file system
+ f = open(f, "rb")
+ except IOError as e:
+ sys.stderr.write("Unable to open file: %s\n" % e)
+ sys.exit(1)
+ except IndexError:
+ sys.stderr.write("No filename provided. Use -h for help\n")
+ sys.exit(1)
+
+ treebuilder = treebuilders.getTreeBuilder(opts.treebuilder)
+
+ if opts.sanitize:
+ tokenizer = sanitizer.HTMLSanitizer
+ else:
+ tokenizer = HTMLTokenizer
+
+ p = html5parser.HTMLParser(tree=treebuilder, tokenizer=tokenizer, debug=opts.log)
+
+ if opts.fragment:
+ parseMethod = p.parseFragment
+ else:
+ parseMethod = p.parse
+
+ if opts.profile:
+ import cProfile
+ import pstats
+ cProfile.runctx("run(parseMethod, f, encoding)", None,
+ {"run": run,
+ "parseMethod": parseMethod,
+ "f": f,
+ "encoding": encoding},
+ "stats.prof")
+ # XXX - We should use a temp file here
+ stats = pstats.Stats('stats.prof')
+ stats.strip_dirs()
+ stats.sort_stats('time')
+ stats.print_stats()
+ elif opts.time:
+ import time
+ t0 = time.time()
+ document = run(parseMethod, f, encoding)
+ t1 = time.time()
+ if document:
+ printOutput(p, document, opts)
+ t2 = time.time()
+ sys.stderr.write("\n\nRun took: %fs (plus %fs to print the output)"%(t1-t0, t2-t1))
+ else:
+ sys.stderr.write("\n\nRun took: %fs"%(t1-t0))
+ else:
+ document = run(parseMethod, f, encoding)
+ if document:
+ printOutput(p, document, opts)
+
+def run(parseMethod, f, encoding):
+ try:
+ document = parseMethod(f, encoding=encoding)
+ except:
+ document = None
+ traceback.print_exc()
+ return document
+
+def printOutput(parser, document, opts):
+ if opts.encoding:
+ print("Encoding:", parser.tokenizer.stream.charEncoding)
+
+ for item in parser.log:
+ print(item)
+
+ if document is not None:
+ if opts.xml:
+ sys.stdout.write(document.toxml("utf-8"))
+ elif opts.tree:
+ if not hasattr(document,'__getitem__'):
+ document = [document]
+ for fragment in document:
+ print(parser.tree.testSerializer(fragment))
+ elif opts.hilite:
+ sys.stdout.write(document.hilite("utf-8"))
+ elif opts.html:
+ kwargs = {}
+ for opt in serializer.HTMLSerializer.options:
+ try:
+ kwargs[opt] = getattr(opts,opt)
+ except:
+ pass
+ if not kwargs['quote_char']:
+ del kwargs['quote_char']
+
+ tokens = treewalkers.getTreeWalker(opts.treebuilder)(document)
+ if sys.version_info[0] >= 3:
+ encoding = None
+ else:
+ encoding = "utf-8"
+ for text in serializer.HTMLSerializer(**kwargs).serialize(tokens, encoding=encoding):
+ sys.stdout.write(text)
+ if not text.endswith('\n'): sys.stdout.write('\n')
+ if opts.error:
+ errList=[]
+ for pos, errorcode, datavars in parser.errors:
+ errList.append("Line %i Col %i"%pos + " " + constants.E.get(errorcode, 'Unknown error "%s"' % errorcode) % datavars)
+ sys.stdout.write("\nParse errors:\n" + "\n".join(errList)+"\n")
+
+def getOptParser():
+ parser = OptionParser(usage=__doc__)
+
+ parser.add_option("-p", "--profile", action="store_true", default=False,
+ dest="profile", help="Use the hotshot profiler to "
+ "produce a detailed log of the run")
+
+ parser.add_option("-t", "--time",
+ action="store_true", default=False, dest="time",
+ help="Time the run using time.time (may not be accurate on all platforms, especially for short runs)")
+
+ parser.add_option("-b", "--treebuilder", action="store", type="string",
+ dest="treebuilder", default="simpleTree")
+
+ parser.add_option("-e", "--error", action="store_true", default=False,
+ dest="error", help="Print a list of parse errors")
+
+ parser.add_option("-f", "--fragment", action="store_true", default=False,
+ dest="fragment", help="Parse as a fragment")
+
+ parser.add_option("", "--tree", action="store_true", default=False,
+ dest="tree", help="Output as debug tree")
+
+ parser.add_option("-x", "--xml", action="store_true", default=False,
+ dest="xml", help="Output as xml")
+
+ parser.add_option("", "--no-html", action="store_false", default=True,
+ dest="html", help="Don't output html")
+
+ parser.add_option("", "--hilite", action="store_true", default=False,
+ dest="hilite", help="Output as formatted highlighted code.")
+
+ parser.add_option("-c", "--encoding", action="store_true", default=False,
+ dest="encoding", help="Print character encoding used")
+
+ parser.add_option("", "--inject-meta-charset", action="store_true",
+ default=False, dest="inject_meta_charset",
+ help="inject <meta charset>")
+
+ parser.add_option("", "--strip-whitespace", action="store_true",
+ default=False, dest="strip_whitespace",
+ help="strip whitespace")
+
+ parser.add_option("", "--omit-optional-tags", action="store_true",
+ default=False, dest="omit_optional_tags",
+ help="omit optional tags")
+
+ parser.add_option("", "--quote-attr-values", action="store_true",
+ default=False, dest="quote_attr_values",
+ help="quote attribute values")
+
+ parser.add_option("", "--use-best-quote-char", action="store_true",
+ default=False, dest="use_best_quote_char",
+ help="use best quote character")
+
+ parser.add_option("", "--quote-char", action="store",
+ default=None, dest="quote_char",
+ help="quote character")
+
+ parser.add_option("", "--no-minimize-boolean-attributes",
+ action="store_false", default=True,
+ dest="minimize_boolean_attributes",
+ help="minimize boolean attributes")
+
+ parser.add_option("", "--use-trailing-solidus", action="store_true",
+ default=False, dest="use_trailing_solidus",
+ help="use trailing solidus")
+
+ parser.add_option("", "--space-before-trailing-solidus",
+ action="store_true", default=False,
+ dest="space_before_trailing_solidus",
+ help="add space before trailing solidus")
+
+ parser.add_option("", "--escape-lt-in-attrs", action="store_true",
+ default=False, dest="escape_lt_in_attrs",
+ help="escape less than signs in attribute values")
+
+ parser.add_option("", "--escape-rcdata", action="store_true",
+ default=False, dest="escape_rcdata",
+ help="escape rcdata element values")
+
+ parser.add_option("", "--sanitize", action="store_true", default=False,
+ dest="sanitize", help="sanitize")
+
+ parser.add_option("-l", "--log", action="store_true", default=False,
+ dest="log", help="log state transitions")
+
+ return parser
+
+if __name__ == "__main__":
+ parse()
diff --git a/testing/web-platform/tests/tools/html5lib/requirements-install.sh b/testing/web-platform/tests/tools/html5lib/requirements-install.sh
new file mode 100755
index 000000000..5f8ba5064
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/requirements-install.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -e
+
+if [[ $USE_OPTIONAL != "true" && $USE_OPTIONAL != "false" ]]; then
+ echo "fatal: \$USE_OPTIONAL not set to true or false. Exiting."
+ exit 1
+fi
+
+pip install -r requirements-test.txt
+
+if [[ $USE_OPTIONAL == "true" && $TRAVIS_PYTHON_VERSION != "pypy" ]]; then
+ if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then
+ pip install --allow-external Genshi --allow-insecure Genshi -r requirements-optional-2.6.txt
+ else
+ pip install --allow-external Genshi --allow-insecure Genshi -r requirements-optional-cpython.txt
+ fi
+fi
diff --git a/testing/web-platform/tests/tools/html5lib/requirements-optional-2.6.txt b/testing/web-platform/tests/tools/html5lib/requirements-optional-2.6.txt
new file mode 100644
index 000000000..37557ac40
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/requirements-optional-2.6.txt
@@ -0,0 +1,5 @@
+-r requirements-optional-cpython.txt
+
+# Can be used to force attributes to be serialized in alphabetical
+# order.
+ordereddict
diff --git a/testing/web-platform/tests/tools/html5lib/requirements-optional-cpython.txt b/testing/web-platform/tests/tools/html5lib/requirements-optional-cpython.txt
new file mode 100644
index 000000000..35ed3529c
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/requirements-optional-cpython.txt
@@ -0,0 +1,5 @@
+-r requirements-optional.txt
+
+# lxml is supported with its own treebuilder ("lxml") and otherwise
+# uses the standard ElementTree support
+lxml
diff --git a/testing/web-platform/tests/tools/html5lib/requirements-optional.txt b/testing/web-platform/tests/tools/html5lib/requirements-optional.txt
new file mode 100644
index 000000000..c63552701
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/requirements-optional.txt
@@ -0,0 +1,13 @@
+-r requirements.txt
+
+# We support a Genshi treewalker that can be used to serialize Genshi
+# streams.
+genshi
+
+# DATrie can be used in place of our Python trie implementation for
+# slightly better parsing performance.
+datrie
+
+# charade can be used as a fallback in case we are unable to determine
+# the encoding of a document.
+charade
diff --git a/testing/web-platform/tests/tools/html5lib/requirements-test.txt b/testing/web-platform/tests/tools/html5lib/requirements-test.txt
new file mode 100644
index 000000000..d5f8088c1
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/requirements-test.txt
@@ -0,0 +1,5 @@
+-r requirements.txt
+
+flake8
+nose
+ordereddict # Python 2.6
diff --git a/testing/web-platform/tests/tools/html5lib/requirements.txt b/testing/web-platform/tests/tools/html5lib/requirements.txt
new file mode 100644
index 000000000..ffe2fce49
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/requirements.txt
@@ -0,0 +1 @@
+six
diff --git a/testing/web-platform/tests/tools/html5lib/setup.py b/testing/web-platform/tests/tools/html5lib/setup.py
new file mode 100644
index 000000000..f75910eaf
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/setup.py
@@ -0,0 +1,44 @@
+from distutils.core import setup
+import os
+import codecs
+
+classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Text Processing :: Markup :: HTML'
+ ]
+
+packages = ['html5lib'] + ['html5lib.'+name
+ for name in os.listdir(os.path.join('html5lib'))
+ if os.path.isdir(os.path.join('html5lib', name)) and
+ not name.startswith('.') and name != 'tests']
+
+current_dir = os.path.dirname(__file__)
+with codecs.open(os.path.join(current_dir, 'README.rst'), 'r', 'utf8') as readme_file:
+ with codecs.open(os.path.join(current_dir, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
+ long_description = readme_file.read() + '\n' + changes_file.read()
+
+setup(name='html5lib',
+ version='0.9999-dev',
+ url='https://github.com/html5lib/html5lib-python',
+ license="MIT License",
+ description='HTML parser based on the WHATWG HTML specifcation',
+ long_description=long_description,
+ classifiers=classifiers,
+ maintainer='James Graham',
+ maintainer_email='james@hoppipolla.co.uk',
+ packages=packages,
+ install_requires=[
+ 'six',
+ ],
+ )
diff --git a/testing/web-platform/tests/tools/html5lib/tox.ini b/testing/web-platform/tests/tools/html5lib/tox.ini
new file mode 100644
index 000000000..479f9e1f7
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/tox.ini
@@ -0,0 +1,30 @@
+[tox]
+envlist = py26,py27,py32,py33,py34,pypy
+
+[testenv]
+deps =
+ -r{toxinidir}/requirements-optional-cpython.txt
+ flake8
+ nose
+commands =
+ {envbindir}/nosetests -q
+ {toxinidir}/flake8-run.sh
+install_command =
+ pip install {opts} {packages}
+
+[testenv:pypy]
+# lxml doesn't work and datrie doesn't make sense
+# (it's slower than the pure-python version)
+deps =
+ charade
+ flake8
+ Genshi
+ nose
+ six
+
+[testenv:py26]
+basepython = python2.6
+deps =
+ -r{toxinidir}/requirements-optional-2.6.txt
+ flake8
+ nose
diff --git a/testing/web-platform/tests/tools/html5lib/utils/entities.py b/testing/web-platform/tests/tools/html5lib/utils/entities.py
new file mode 100644
index 000000000..116a27cbc
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/utils/entities.py
@@ -0,0 +1,88 @@
+import json
+
+import html5lib
+
+def parse(path="html5ents.xml"):
+ return html5lib.parse(open(path), treebuilder="lxml")
+
+def entity_table(tree):
+ return dict((entity_name("".join(tr[0].xpath(".//text()"))),
+ entity_characters(tr[1].text))
+ for tr in tree.xpath("//h:tbody/h:tr",
+ namespaces={"h":"http://www.w3.org/1999/xhtml"}))
+
+def entity_name(inp):
+ return inp.strip()
+
+def entity_characters(inp):
+ return "".join(codepoint_to_character(item)
+ for item in inp.split()
+ if item)
+
+def codepoint_to_character(inp):
+ return ("\U000"+inp[2:]).decode("unicode-escape")
+
+def make_tests_json(entities):
+ test_list = make_test_list(entities)
+ tests_json = {"tests":
+ [make_test(*item) for item in test_list]
+ }
+ return tests_json
+
+def make_test(name, characters, good):
+ return {
+ "description":test_description(name, good),
+ "input":"&%s"%name,
+ "output":test_expected(name, characters, good)
+ }
+
+def test_description(name, good):
+ with_semicolon = name.endswith(";")
+ semicolon_text = {True:"with a semi-colon",
+ False:"without a semi-colon"}[with_semicolon]
+ if good:
+ text = "Named entity: %s %s"%(name, semicolon_text)
+ else:
+ text = "Bad named entity: %s %s"%(name, semicolon_text)
+ return text
+
+def test_expected(name, characters, good):
+ rv = []
+ if not good or not name.endswith(";"):
+ rv.append("ParseError")
+ rv.append(["Character", characters])
+ return rv
+
+def make_test_list(entities):
+ tests = []
+ for entity_name, characters in entities.items():
+ if entity_name.endswith(";") and not subentity_exists(entity_name, entities):
+ tests.append((entity_name[:-1], "&" + entity_name[:-1], False))
+ tests.append((entity_name, characters, True))
+ return sorted(tests)
+
+def subentity_exists(entity_name, entities):
+ for i in range(1, len(entity_name)):
+ if entity_name[:-i] in entities:
+ return True
+ return False
+
+def make_entities_code(entities):
+ entities_text = "\n".join(" \"%s\": u\"%s\","%(
+ name, entities[name].encode(
+ "unicode-escape").replace("\"", "\\\""))
+ for name in sorted(entities.keys()))
+ return """entities = {
+%s
+}"""%entities_text
+
+def main():
+ entities = entity_table(parse())
+ tests_json = make_tests_json(entities)
+ json.dump(tests_json, open("namedEntities.test", "w"), indent=4)
+ code = make_entities_code(entities)
+ open("entities_constants.py", "w").write(code)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/testing/web-platform/tests/tools/html5lib/utils/iana_parse.py b/testing/web-platform/tests/tools/html5lib/utils/iana_parse.py
new file mode 100644
index 000000000..6dde94c28
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/utils/iana_parse.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+import sys
+import urllib.request, urllib.error, urllib.parse
+import codecs
+
+def main():
+ encodings = []
+ f = urllib.request.urlopen(sys.argv[1])
+ for line in f:
+ if line.startswith("Name: ") or line.startswith("Alias: "):
+ enc = line.split()[1]
+ try:
+ codecs.lookup(enc)
+ if enc.lower not in encodings:
+ encodings.append(enc.lower())
+ except LookupError:
+ pass
+ sys.stdout.write("encodings = frozenset((\n")
+ for enc in encodings:
+ sys.stdout.write(' "%s",\n'%enc)
+ sys.stdout.write(' ))')
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/html5lib/utils/spider.py b/testing/web-platform/tests/tools/html5lib/utils/spider.py
new file mode 100644
index 000000000..a7b803197
--- /dev/null
+++ b/testing/web-platform/tests/tools/html5lib/utils/spider.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+"""Spider to try and find bugs in the parser. Requires httplib2 and elementtree
+
+usage:
+import spider
+s = spider.Spider()
+s.spider("http://www.google.com", maxURLs=100)
+"""
+
+import urllib.request, urllib.error, urllib.parse
+import urllib.robotparser
+import md5
+
+import httplib2
+
+import html5lib
+from html5lib.treebuilders import etree
+
+class Spider(object):
+ def __init__(self):
+ self.unvisitedURLs = set()
+ self.visitedURLs = set()
+ self.buggyURLs=set()
+ self.robotParser = urllib.robotparser.RobotFileParser()
+ self.contentDigest = {}
+ self.http = httplib2.Http(".cache")
+
+ def run(self, initialURL, maxURLs=1000):
+ urlNumber = 0
+ self.visitedURLs.add(initialURL)
+ content = self.loadURL(initialURL)
+ while maxURLs is None or urlNumber < maxURLs:
+ if content is not None:
+ self.parse(content)
+ urlNumber += 1
+ if not self.unvisitedURLs:
+ break
+ content = self.loadURL(self.unvisitedURLs.pop())
+
+ def parse(self, content):
+ failed = False
+ p = html5lib.HTMLParser(tree=etree.TreeBuilder)
+ try:
+ tree = p.parse(content)
+ except:
+ self.buggyURLs.add(self.currentURL)
+ failed = True
+ print("BUGGY:", self.currentURL)
+ self.visitedURLs.add(self.currentURL)
+ if not failed:
+ self.updateURLs(tree)
+
+ def loadURL(self, url):
+ resp, content = self.http.request(url, "GET")
+ self.currentURL = url
+ digest = md5.md5(content).hexdigest()
+ if digest in self.contentDigest:
+ content = None
+ self.visitedURLs.add(url)
+ else:
+ self.contentDigest[digest] = url
+
+ if resp['status'] != "200":
+ content = None
+
+ return content
+
+ def updateURLs(self, tree):
+ """Take all the links in the current document, extract the URLs and
+ update the list of visited and unvisited URLs according to whether we
+ have seen them before or not"""
+ urls = set()
+ #Remove all links we have already visited
+ for link in tree.findall(".//a"):
+ try:
+ url = urllib.parse.urldefrag(link.attrib['href'])[0]
+ if (url and url not in self.unvisitedURLs and url
+ not in self.visitedURLs):
+ urls.add(url)
+ except KeyError:
+ pass
+
+ #Remove all non-http URLs and a dd a sutiable base URL where that is
+ #missing
+ newUrls = set()
+ for url in urls:
+ splitURL = list(urllib.parse.urlsplit(url))
+ if splitURL[0] != "http":
+ continue
+ if splitURL[1] == "":
+ splitURL[1] = urllib.parse.urlsplit(self.currentURL)[1]
+ newUrls.add(urllib.parse.urlunsplit(splitURL))
+ urls = newUrls
+
+ responseHeaders = {}
+ #Now we want to find the content types of the links we haven't visited
+ for url in urls:
+ try:
+ resp, content = self.http.request(url, "HEAD")
+ responseHeaders[url] = resp
+ except AttributeError as KeyError:
+ #Don't know why this happens
+ pass
+
+
+ #Remove links not of content-type html or pages not found
+ #XXX - need to deal with other status codes?
+ toVisit = set([url for url in urls if url in responseHeaders and
+ "html" in responseHeaders[url]['content-type'] and
+ responseHeaders[url]['status'] == "200"])
+
+ #Now check we are allowed to spider the page
+ for url in toVisit:
+ robotURL = list(urllib.parse.urlsplit(url)[:2])
+ robotURL.extend(["robots.txt", "", ""])
+ robotURL = urllib.parse.urlunsplit(robotURL)
+ self.robotParser.set_url(robotURL)
+ if not self.robotParser.can_fetch("*", url):
+ toVisit.remove(url)
+
+ self.visitedURLs.update(urls)
+ self.unvisitedURLs.update(toVisit)
diff --git a/testing/web-platform/tests/tools/lint/__init__.py b/testing/web-platform/tests/tools/lint/__init__.py
new file mode 100644
index 000000000..e5eb5e12d
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/__init__.py
@@ -0,0 +1 @@
+from . import lint
diff --git a/testing/web-platform/tests/tools/lint/lint.py b/testing/web-platform/tests/tools/lint/lint.py
new file mode 100644
index 000000000..2aee3da1c
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/lint.py
@@ -0,0 +1,426 @@
+from __future__ import print_function, unicode_literals
+
+import abc
+import argparse
+import ast
+import fnmatch
+import json
+import os
+import re
+import subprocess
+import sys
+
+from collections import defaultdict
+
+from ..localpaths import repo_root
+
+from manifest.sourcefile import SourceFile
+from six import iteritems, itervalues
+from six.moves import range
+
+here = os.path.abspath(os.path.split(__file__)[0])
+
+ERROR_MSG = """You must fix all errors; for details on how to fix them, see
+https://github.com/w3c/web-platform-tests/blob/master/docs/lint-tool.md
+
+However, instead of fixing a particular error, it's sometimes
+OK to add a line to the lint.whitelist file in the root of the
+web-platform-tests directory to make the lint tool ignore it.
+
+For example, to make the lint tool ignore all '%s'
+errors in the %s file,
+you could add the following line to the lint.whitelist file.
+
+%s:%s"""
+
+def all_git_paths(repo_root):
+ command_line = ["git", "ls-tree", "-r", "--name-only", "HEAD"]
+ output = subprocess.check_output(command_line, cwd=repo_root)
+ for item in output.split("\n"):
+ yield item
+
+
+def check_path_length(repo_root, path):
+ if len(path) + 1 > 150:
+ return [("PATH LENGTH", "/%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), None)]
+ return []
+
+
+def parse_whitelist(f):
+ """
+ Parse the whitelist file given by `f`, and return the parsed structure.
+ """
+
+ data = defaultdict(lambda:defaultdict(set))
+ ignored_files = set()
+
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ parts = [item.strip() for item in line.split(":")]
+ if len(parts) == 2:
+ parts.append(None)
+ else:
+ parts[-1] = int(parts[-1])
+
+ error_type, file_match, line_number = parts
+
+ if error_type == "*":
+ ignored_files.add(file_match)
+ else:
+ data[file_match][error_type].add(line_number)
+
+ return data, ignored_files
+
+
+def filter_whitelist_errors(data, path, errors):
+ """
+ Filter out those errors that are whitelisted in `data`.
+ """
+
+ whitelisted = [False for item in range(len(errors))]
+
+ for file_match, whitelist_errors in iteritems(data):
+ if fnmatch.fnmatch(path, file_match):
+ for i, (error_type, msg, path, line) in enumerate(errors):
+ if error_type in whitelist_errors:
+ allowed_lines = whitelist_errors[error_type]
+ if None in allowed_lines or line in allowed_lines:
+ whitelisted[i] = True
+
+ return [item for i, item in enumerate(errors) if not whitelisted[i]]
+
+class Regexp(object):
+ pattern = None
+ file_extensions = None
+ error = None
+ _re = None
+
+ def __init__(self):
+ self._re = re.compile(self.pattern)
+
+ def applies(self, path):
+ return (self.file_extensions is None or
+ os.path.splitext(path)[1] in self.file_extensions)
+
+ def search(self, line):
+ return self._re.search(line)
+
+class TrailingWhitespaceRegexp(Regexp):
+ pattern = b"[ \t\f\v]$"
+ error = "TRAILING WHITESPACE"
+ description = "Whitespace at EOL"
+
+class TabsRegexp(Regexp):
+ pattern = b"^\t"
+ error = "INDENT TABS"
+ description = "Tabs used for indentation"
+
+class CRRegexp(Regexp):
+ pattern = b"\r$"
+ error = "CR AT EOL"
+ description = "CR character in line separator"
+
+class W3CTestOrgRegexp(Regexp):
+ pattern = b"w3c\-test\.org"
+ error = "W3C-TEST.ORG"
+ description = "External w3c-test.org domain used"
+
+class Webidl2Regexp(Regexp):
+ pattern = b"webidl2\.js"
+ error = "WEBIDL2.JS"
+ description = "Legacy webidl2.js script used"
+
+class ConsoleRegexp(Regexp):
+ pattern = b"console\.[a-zA-Z]+\s*\("
+ error = "CONSOLE"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Console logging API used"
+
+class PrintRegexp(Regexp):
+ pattern = b"print(?:\s|\s*\()"
+ error = "PRINT STATEMENT"
+ file_extensions = [".py"]
+ description = "Print function used"
+
+regexps = [item() for item in
+ [TrailingWhitespaceRegexp,
+ TabsRegexp,
+ CRRegexp,
+ W3CTestOrgRegexp,
+ Webidl2Regexp,
+ ConsoleRegexp,
+ PrintRegexp]]
+
+def check_regexp_line(repo_root, path, f):
+ errors = []
+
+ applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
+
+ for i, line in enumerate(f):
+ for regexp in applicable_regexps:
+ if regexp.search(line):
+ errors.append((regexp.error, regexp.description, path, i+1))
+
+ return errors
+
+def check_parsed(repo_root, path, f):
+ source_file = SourceFile(repo_root, path, "/", contents=f.read())
+
+ errors = []
+
+ if source_file.name_is_non_test or source_file.name_is_manual:
+ return []
+
+ if source_file.markup_type is None:
+ return []
+
+ if source_file.root is None:
+ return [("PARSE-FAILED", "Unable to parse file", path, None)]
+
+ if len(source_file.timeout_nodes) > 1:
+ errors.append(("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", path, None))
+
+ for timeout_node in source_file.timeout_nodes:
+ timeout_value = timeout_node.attrib.get("content", "").lower()
+ if timeout_value != "long":
+ errors.append(("INVALID-TIMEOUT", "Invalid timeout value %s" % timeout_value, path, None))
+
+ if source_file.testharness_nodes:
+ if len(source_file.testharness_nodes) > 1:
+ errors.append(("MULTIPLE-TESTHARNESS",
+ "More than one <script src='/resources/testharness.js'>", path, None))
+
+ testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
+ if not testharnessreport_nodes:
+ errors.append(("MISSING-TESTHARNESSREPORT",
+ "Missing <script src='/resources/testharnessreport.js'>", path, None))
+ else:
+ if len(testharnessreport_nodes) > 1:
+ errors.append(("MULTIPLE-TESTHARNESSREPORT",
+ "More than one <script src='/resources/testharnessreport.js'>", path, None))
+
+ testharnesscss_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}link[@href='/resources/testharness.css']")
+ if testharnesscss_nodes:
+ errors.append(("PRESENT-TESTHARNESSCSS",
+ "Explicit link to testharness.css present", path, None))
+
+ for element in source_file.variant_nodes:
+ if "content" not in element.attrib:
+ errors.append(("VARIANT-MISSING",
+ "<meta name=variant> missing 'content' attribute", path, None))
+ else:
+ variant = element.attrib["content"]
+ if variant != "" and variant[0] not in ("?", "#"):
+ errors.append(("MALFORMED-VARIANT",
+ "%s <meta name=variant> 'content' attribute must be the empty string or start with '?' or '#'" % path, None))
+
+ seen_elements = {"timeout": False,
+ "testharness": False,
+ "testharnessreport": False}
+ required_elements = [key for key, value in {"testharness": True,
+ "testharnessreport": len(testharnessreport_nodes) > 0,
+ "timeout": len(source_file.timeout_nodes) > 0}.items()
+ if value]
+
+ for elem in source_file.root.iter():
+ if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
+ seen_elements["timeout"] = True
+ if seen_elements["testharness"]:
+ errors.append(("LATE-TIMEOUT",
+ "<meta name=timeout> seen after testharness.js script", path, None))
+
+ elif elem == source_file.testharness_nodes[0]:
+ seen_elements["testharness"] = True
+
+ elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
+ seen_elements["testharnessreport"] = True
+ if not seen_elements["testharness"]:
+ errors.append(("EARLY-TESTHARNESSREPORT",
+ "testharnessreport.js script seen before testharness.js script", path, None))
+
+ if all(seen_elements[name] for name in required_elements):
+ break
+
+
+ for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"):
+ src = element.attrib["src"]
+ for name in ["testharness", "testharnessreport"]:
+ if "%s.js" % name == src or ("/%s.js" % name in src and src != "/resources/%s.js" % name):
+ errors.append(("%s-PATH" % name.upper(), "%s.js script seen with incorrect path" % name, path, None))
+
+
+ return errors
+
+class ASTCheck(object):
+ __metaclass__ = abc.ABCMeta
+ error = None
+ description = None
+
+ @abc.abstractmethod
+ def check(self, root):
+ pass
+
+class OpenModeCheck(ASTCheck):
+ error = "OPEN-NO-MODE"
+ description = "File opened without providing an explicit mode (note: binary files must be read with 'b' in the mode flags)"
+
+ def check(self, root):
+ errors = []
+ for node in ast.walk(root):
+ if isinstance(node, ast.Call):
+ if hasattr(node.func, "id") and node.func.id in ("open", "file"):
+ if (len(node.args) < 2 and
+ all(item.arg != "mode" for item in node.keywords)):
+ errors.append(node.lineno)
+ return errors
+
+ast_checkers = [item() for item in [OpenModeCheck]]
+
+def check_python_ast(repo_root, path, f):
+ if not path.endswith(".py"):
+ return []
+
+ try:
+ root = ast.parse(f.read())
+ except SyntaxError as e:
+ return [("PARSE-FAILED", "Unable to parse file", path, e.lineno)]
+
+ errors = []
+ for checker in ast_checkers:
+ for lineno in checker.check(root):
+ errors.append((checker.error, checker.description, path, lineno))
+ return errors
+
+
+def check_path(repo_root, path):
+ """
+ Runs lints that check the file path.
+
+ :param repo_root: the repository root
+ :param path: the path of the file within the repository
+ :returns: a list of errors found in ``path``
+ """
+
+ errors = []
+ for path_fn in path_lints:
+ errors.extend(path_fn(repo_root, path))
+ return errors
+
+
+def check_file_contents(repo_root, path, f):
+ """
+ Runs lints that check the file contents.
+
+ :param repo_root: the repository root
+ :param path: the path of the file within the repository
+ :param f: a file-like object with the file contents
+ :returns: a list of errors found in ``f``
+ """
+
+ errors = []
+ for file_fn in file_lints:
+ errors.extend(file_fn(repo_root, path, f))
+ f.seek(0)
+ return errors
+
+
+def output_errors_text(errors):
+ for error_type, description, path, line_number in errors:
+ pos_string = path
+ if line_number:
+ pos_string += " %s" % line_number
+ print("%s: %s %s" % (error_type, pos_string, description))
+
+def output_errors_json(errors):
+ for error_type, error, path, line_number in errors:
+ print(json.dumps({"path": path, "lineno": line_number,
+ "rule": error_type, "message": error}))
+
+def output_error_count(error_count):
+ if not error_count:
+ return
+
+ by_type = " ".join("%s: %d" % item for item in error_count.items())
+ count = sum(error_count.values())
+ if count == 1:
+ print("There was 1 error (%s)" % (by_type,))
+ else:
+ print("There were %d errors (%s)" % (count, by_type))
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("paths", nargs="*",
+ help="List of paths to lint")
+ parser.add_argument("--json", action="store_true",
+ help="Output machine-readable JSON format")
+ return parser.parse_args()
+
+def main():
+ args = parse_args()
+ paths = args.paths if args.paths else all_git_paths(repo_root)
+ return lint(repo_root, paths, args.json)
+
+def lint(repo_root, paths, output_json):
+ error_count = defaultdict(int)
+ last = None
+
+ with open(os.path.join(repo_root, "lint.whitelist")) as f:
+ whitelist, ignored_files = parse_whitelist(f)
+
+ if output_json:
+ output_errors = output_errors_json
+ else:
+ output_errors = output_errors_text
+
+ def process_errors(path, errors):
+ """
+ Filters and prints the errors, and updates the ``error_count`` object.
+
+ :param path: the path of the file that contains the errors
+ :param errors: a list of error tuples (error type, message, path, line number)
+ :returns: ``None`` if there were no errors, or
+ a tuple of the error type and the path otherwise
+ """
+
+ errors = filter_whitelist_errors(whitelist, path, errors)
+
+ if not errors:
+ return None
+
+ output_errors(errors)
+ for error_type, error, path, line in errors:
+ error_count[error_type] += 1
+
+ return (errors[-1][0], path)
+
+ for path in paths:
+ abs_path = os.path.join(repo_root, path)
+ if not os.path.exists(abs_path):
+ continue
+
+ if any(fnmatch.fnmatch(path, file_match) for file_match in ignored_files):
+ continue
+
+ errors = check_path(repo_root, path)
+ last = process_errors(path, errors) or last
+
+ if not os.path.isdir(abs_path):
+ with open(abs_path, 'rb') as f:
+ errors = check_file_contents(repo_root, path, f)
+ last = process_errors(path, errors) or last
+
+ if not output_json:
+ output_error_count(error_count)
+ if error_count:
+ print(ERROR_MSG % (last[0], last[1], last[0], last[1]))
+ return sum(itervalues(error_count))
+
+path_lints = [check_path_length]
+file_lints = [check_regexp_line, check_parsed, check_python_ast]
+
+if __name__ == "__main__":
+ error_count = main()
+ if error_count > 0:
+ sys.exit(1)
diff --git a/testing/web-platform/tests/tools/lint/tests/__init__.py b/testing/web-platform/tests/tools/lint/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/__init__.py
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/broken.html b/testing/web-platform/tests/tools/lint/tests/dummy/broken.html
new file mode 100644
index 000000000..74793c43c
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/broken.html
@@ -0,0 +1 @@
+THIS LINE HAS TRAILING WHITESPACE
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html b/testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html
new file mode 100644
index 000000000..74793c43c
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html
@@ -0,0 +1 @@
+THIS LINE HAS TRAILING WHITESPACE
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/lint.whitelist b/testing/web-platform/tests/tools/lint/tests/dummy/lint.whitelist
new file mode 100644
index 000000000..a763e4432
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/lint.whitelist
@@ -0,0 +1 @@
+*:broken_ignored.html
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/okay.html b/testing/web-platform/tests/tools/lint/tests/dummy/okay.html
new file mode 100644
index 000000000..a3178a3c8
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/okay.html
@@ -0,0 +1 @@
+THIS LINE HAS NO TRAILING WHITESPACE
diff --git a/testing/web-platform/tests/tools/lint/tests/test_file_lints.py b/testing/web-platform/tests/tools/lint/tests/test_file_lints.py
new file mode 100644
index 000000000..3e3e359b0
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/test_file_lints.py
@@ -0,0 +1,356 @@
+from __future__ import unicode_literals
+
+from ..lint import check_file_contents
+import os
+import pytest
+import six
+
+INTERESTING_FILE_NAMES = {
+ "python": [
+ "test.py",
+ ],
+ "js": [
+ "test.js",
+ ],
+ "web-lax": [
+ "test.htm",
+ "test.html",
+ ],
+ "web-strict": [
+ "test.svg",
+ "test.xht",
+ "test.xhtml",
+ ],
+}
+
+def check_with_files(input_bytes):
+ return {
+ filename: (check_file_contents("", filename, six.BytesIO(input_bytes)), kind)
+ for (filename, kind) in
+ (
+ (os.path.join("html", filename), kind)
+ for (kind, filenames) in INTERESTING_FILE_NAMES.items()
+ for filename in filenames
+ )
+ }
+
+
+def test_trailing_whitespace():
+ error_map = check_with_files(b"test; ")
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("TRAILING WHITESPACE", "Whitespace at EOL", filename, 1)]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_indent_tabs():
+ error_map = check_with_files(b"def foo():\n\x09pass")
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("INDENT TABS", "Tabs used for indentation", filename, 2)]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_cr_not_at_eol():
+ error_map = check_with_files(b"line1\rline2\r")
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("CR AT EOL", "CR character in line separator", filename, 1)]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_cr_at_eol():
+ error_map = check_with_files(b"line1\r\nline2\r\n")
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [
+ ("CR AT EOL", "CR character in line separator", filename, 1),
+ ("CR AT EOL", "CR character in line separator", filename, 2),
+ ]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_w3c_test_org():
+ error_map = check_with_files(b"import('http://www.w3c-test.org/')")
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("W3C-TEST.ORG", "External w3c-test.org domain used", filename, 1)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_webidl2_js():
+ error_map = check_with_files(b"<script src=/resources/webidl2.js>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("WEBIDL2.JS", "Legacy webidl2.js script used", filename, 1)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_console():
+ error_map = check_with_files(b"<script>\nconsole.log('error');\nconsole.error ('log')\n</script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind in ["web-lax", "web-strict", "js"]:
+ assert errors == [
+ ("CONSOLE", "Console logging API used", filename, 2),
+ ("CONSOLE", "Console logging API used", filename, 3),
+ ]
+ else:
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+
+
+def test_meta_timeout():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<meta name="timeout" />
+<meta name="timeout" content="short" />
+<meta name="timeout" content="long" />
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", filename, None),
+ ("INVALID-TIMEOUT", "Invalid timeout value ", filename, None),
+ ("INVALID-TIMEOUT", "Invalid timeout value short", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_early_testharnessreport():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("EARLY-TESTHARNESSREPORT", "testharnessreport.js script seen before testharness.js script", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_multiple_testharness():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TESTHARNESS", "More than one <script src='/resources/testharness.js'>", filename, None),
+ ("MISSING-TESTHARNESSREPORT", "Missing <script src='/resources/testharnessreport.js'>", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_multiple_testharnessreport():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TESTHARNESSREPORT", "More than one <script src='/resources/testharnessreport.js'>", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_present_testharnesscss():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<link rel="stylesheet" href="/resources/testharness.css"/>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("PRESENT-TESTHARNESSCSS", "Explicit link to testharness.css present", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_testharness_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="testharness.js"></script>
+<script src="resources/testharness.js"></script>
+<script src="../resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("W3C-TEST.ORG", "External w3c-test.org domain used", filename, 5)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind in ["web-lax", "web-strict"]:
+ expected.extend([
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ])
+ assert errors == expected
+
+
+def test_testharnessreport_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="testharnessreport.js"></script>
+<script src="resources/testharnessreport.js"></script>
+<script src="../resources/testharnessreport.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ expected = [("W3C-TEST.ORG", "External w3c-test.org domain used", filename, 5)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind in ["web-lax", "web-strict"]:
+ expected.extend([
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ])
+ assert errors == expected
+
+
+def test_not_testharness_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="resources/webperftestharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 1),
+ ]
+ else:
+ assert errors == []
+
+
+@pytest.mark.skipif(six.PY3, reason="Cannot parse print statements from python 3")
+def test_print_statement():
+ error_map = check_with_files(b"def foo():\n print 'statement'\n print\n")
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind == "python":
+ assert errors == [
+ ("PRINT STATEMENT", "Print function used", filename, 2),
+ ("PRINT STATEMENT", "Print function used", filename, 3),
+ ]
+ elif kind == "web-strict":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, None),
+ ]
+ else:
+ assert errors == []
+
+
+def test_print_function():
+ error_map = check_with_files(b"def foo():\n print('function')\n")
+
+ for (filename, (errors, kind)) in error_map.items():
+ if kind == "python":
+ assert errors == [
+ ("PRINT STATEMENT", "Print function used", filename, 2),
+ ]
+ elif kind == "web-strict":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, None),
+ ]
+ else:
+ assert errors == []
+
+
+open_mode_code = """
+def first():
+ return {0}("test.png")
+
+def second():
+ return {0}("test.png", "r")
+
+def third():
+ return {0}("test.png", "rb")
+
+def fourth():
+ return {0}("test.png", encoding="utf-8")
+
+def fifth():
+ return {0}("test.png", mode="rb")
+"""
+
+
+def test_open_mode():
+ for method in ["open", "file"]:
+ code = open_mode_code.format(method).encode("utf-8")
+ errors = check_file_contents("", "test.py", six.BytesIO(code))
+
+ message = ("File opened without providing an explicit mode (note: " +
+ "binary files must be read with 'b' in the mode flags)")
+
+ assert errors == [
+ ("OPEN-NO-MODE", message, "test.py", 3),
+ ("OPEN-NO-MODE", message, "test.py", 12),
+ ]
diff --git a/testing/web-platform/tests/tools/lint/tests/test_lint.py b/testing/web-platform/tests/tools/lint/tests/test_lint.py
new file mode 100644
index 000000000..ebca206c8
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/test_lint.py
@@ -0,0 +1,138 @@
+from __future__ import unicode_literals
+
+import os
+
+import mock
+import pytest
+import six
+
+from .. import lint as lint_mod
+from ..lint import filter_whitelist_errors, parse_whitelist, lint
+
+_dummy_repo = os.path.join(os.path.dirname(__file__), "dummy")
+
+
+def _mock_lint(name):
+ wrapped = getattr(lint_mod, name)
+ return mock.patch(lint_mod.__name__ + "." + name, wraps=wrapped)
+
+
+def test_filter_whitelist_errors():
+ filtered = filter_whitelist_errors({}, '', [])
+ assert filtered == []
+
+
+def test_parse_whitelist():
+ input_buffer = six.StringIO("""
+# Comment
+CR AT EOL: svg/import/*
+CR AT EOL: streams/resources/test-utils.js
+
+INDENT TABS: .gitmodules
+INDENT TABS: app-uri/*
+INDENT TABS: svg/*
+
+TRAILING WHITESPACE: app-uri/*
+
+CONSOLE:streams/resources/test-utils.js: 12
+
+*:*.pdf
+*:resources/*
+""")
+
+ expected_data = {
+ '.gitmodules': {
+ 'INDENT TABS': {None},
+ },
+ 'app-uri/*': {
+ 'TRAILING WHITESPACE': {None},
+ 'INDENT TABS': {None},
+ },
+ 'streams/resources/test-utils.js': {
+ 'CONSOLE': {12},
+ 'CR AT EOL': {None},
+ },
+ 'svg/*': {
+ 'INDENT TABS': {None},
+ },
+ 'svg/import/*': {
+ 'CR AT EOL': {None},
+ },
+ }
+ expected_ignored = {"*.pdf", "resources/*"}
+ data, ignored = parse_whitelist(input_buffer)
+ assert data == expected_data
+ assert ignored == expected_ignored
+
+
+def test_lint_no_files(capsys):
+ rv = lint(_dummy_repo, [], False)
+ assert rv == 0
+ out, err = capsys.readouterr()
+ assert out == ""
+ assert err == ""
+
+
+def test_lint_ignored_file(capsys):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken_ignored.html"], False)
+ assert rv == 0
+ assert not mocked_check_path.called
+ assert not mocked_check_file_contents.called
+ out, err = capsys.readouterr()
+ assert out == ""
+ assert err == ""
+
+
+def test_lint_not_existing_file(capsys):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ # really long path-linted filename
+ name = "a" * 256 + ".html"
+ rv = lint(_dummy_repo, [name], False)
+ assert rv == 0
+ assert not mocked_check_path.called
+ assert not mocked_check_file_contents.called
+ out, err = capsys.readouterr()
+ assert out == ""
+ assert err == ""
+
+
+def test_lint_passing(capsys):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["okay.html"], False)
+ assert rv == 0
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ out, err = capsys.readouterr()
+ assert out == ""
+ assert err == ""
+
+
+def test_lint_failing(capsys):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken.html"], False)
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ out, err = capsys.readouterr()
+ assert "TRAILING WHITESPACE" in out
+ assert "broken.html 1 " in out
+ assert err == ""
+
+
+def test_lint_passing_and_failing(capsys):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken.html", "okay.html"], False)
+ assert rv == 1
+ assert mocked_check_path.call_count == 2
+ assert mocked_check_file_contents.call_count == 2
+ out, err = capsys.readouterr()
+ assert "TRAILING WHITESPACE" in out
+ assert "broken.html 1 " in out
+ assert "okay.html" not in out
+ assert err == ""
diff --git a/testing/web-platform/tests/tools/lint/tests/test_path_lints.py b/testing/web-platform/tests/tools/lint/tests/test_path_lints.py
new file mode 100644
index 000000000..83cb8aaa6
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/test_path_lints.py
@@ -0,0 +1,25 @@
+from __future__ import unicode_literals
+
+from ..lint import check_path
+import pytest
+import six
+
+def test_allowed_path_length():
+ basename = 29 * "test/"
+
+ for idx in range(5):
+ filename = basename + idx * "a"
+
+ errors = check_path("/foo/", filename)
+ assert errors == []
+
+
+def test_forbidden_path_length():
+ basename = 29 * "test/"
+
+ for idx in range(5, 10):
+ filename = basename + idx * "a"
+ message = "/%s longer than maximum path length (%s > 150)" % (filename, 146 + idx)
+
+ errors = check_path("/foo/", filename)
+ assert errors == [("PATH LENGTH", message, None)]
diff --git a/testing/web-platform/tests/tools/localpaths.py b/testing/web-platform/tests/tools/localpaths.py
new file mode 100644
index 000000000..a4930015c
--- /dev/null
+++ b/testing/web-platform/tests/tools/localpaths.py
@@ -0,0 +1,14 @@
+import os
+import sys
+
+here = os.path.abspath(os.path.split(__file__)[0])
+repo_root = os.path.abspath(os.path.join(here, os.pardir))
+
+sys.path.insert(0, os.path.join(here))
+sys.path.insert(0, os.path.join(here, "six"))
+sys.path.insert(0, os.path.join(here, "html5lib"))
+sys.path.insert(0, os.path.join(here, "wptserve"))
+sys.path.insert(0, os.path.join(here, "pywebsocket", "src"))
+sys.path.insert(0, os.path.join(here, "py"))
+sys.path.insert(0, os.path.join(here, "pytest"))
+sys.path.insert(0, os.path.join(here, "webdriver"))
diff --git a/testing/web-platform/tests/tools/manifest/__init__.py b/testing/web-platform/tests/tools/manifest/__init__.py
new file mode 100644
index 000000000..7ecb04be9
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/__init__.py
@@ -0,0 +1,5 @@
+from . import item
+from . import manifest
+from . import sourcefile
+from . import tree
+from . import update
diff --git a/testing/web-platform/tests/tools/manifest/item.py b/testing/web-platform/tests/tools/manifest/item.py
new file mode 100644
index 000000000..76c91697f
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/item.py
@@ -0,0 +1,191 @@
+import os
+from six.moves.urllib.parse import urljoin
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+from .utils import from_os_path, to_os_path
+
+item_types = ["testharness", "reftest", "manual", "stub", "wdspec"]
+
+
+def get_source_file(source_files, tests_root, manifest, path):
+ def make_new():
+ from .sourcefile import SourceFile
+
+ return SourceFile(tests_root, path, manifest.url_base)
+
+ if source_files is None:
+ return make_new()
+
+ if path not in source_files:
+ source_files[path] = make_new()
+
+ return source_files[path]
+
+
+class ManifestItem(object):
+ __metaclass__ = ABCMeta
+
+ item_type = None
+
+ def __init__(self, source_file, manifest=None):
+ self.manifest = manifest
+ self.source_file = source_file
+
+ @abstractproperty
+ def id(self):
+ """The test's id (usually its url)"""
+ pass
+
+ @property
+ def path(self):
+ """The test path relative to the test_root"""
+ return self.source_file.rel_path
+
+ @property
+ def https(self):
+ return "https" in self.source_file.meta_flags
+
+ def key(self):
+ """A unique identifier for the test"""
+ return (self.item_type, self.id)
+
+ def meta_key(self):
+ """Extra metadata that doesn't form part of the test identity, but for
+ which changes mean regenerating the manifest (e.g. the test timeout."""
+ return ()
+
+ def __eq__(self, other):
+ if not hasattr(other, "key"):
+ return False
+ return self.key() == other.key()
+
+ def __hash__(self):
+ return hash(self.key() + self.meta_key())
+
+ def __repr__(self):
+ return "<%s.%s id=%s, path=%s>" % (self.__module__, self.__class__.__name__, self.id, self.path)
+
+ def to_json(self):
+ return {"path": from_os_path(self.path)}
+
+ @classmethod
+ def from_json(self, manifest, tests_root, obj, source_files=None):
+ raise NotImplementedError
+
+
+class URLManifestItem(ManifestItem):
+ def __init__(self, source_file, url, url_base="/", manifest=None):
+ ManifestItem.__init__(self, source_file, manifest=manifest)
+ self._url = url
+ self.url_base = url_base
+
+ @property
+ def id(self):
+ return self.url
+
+ @property
+ def url(self):
+ return urljoin(self.url_base, self._url)
+
+ def to_json(self):
+ rv = ManifestItem.to_json(self)
+ rv["url"] = self._url
+ return rv
+
+ @classmethod
+ def from_json(cls, manifest, tests_root, obj, source_files=None):
+ source_file = get_source_file(source_files, tests_root, manifest,
+ to_os_path(obj["path"]))
+ return cls(source_file,
+ obj["url"],
+ url_base=manifest.url_base,
+ manifest=manifest)
+
+
+class TestharnessTest(URLManifestItem):
+ item_type = "testharness"
+
+ def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
+ URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
+ self.timeout = timeout
+
+ def meta_key(self):
+ return (self.timeout,)
+
+ def to_json(self):
+ rv = URLManifestItem.to_json(self)
+ if self.timeout is not None:
+ rv["timeout"] = self.timeout
+ return rv
+
+ @classmethod
+ def from_json(cls, manifest, tests_root, obj, source_files=None):
+ source_file = get_source_file(source_files, tests_root, manifest,
+ to_os_path(obj["path"]))
+ return cls(source_file,
+ obj["url"],
+ url_base=manifest.url_base,
+ timeout=obj.get("timeout"),
+ manifest=manifest)
+
+
+class RefTest(URLManifestItem):
+ item_type = "reftest"
+
+ def __init__(self, source_file, url, references, url_base="/", timeout=None,
+ viewport_size=None, dpi=None, manifest=None):
+ URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
+ for _, ref_type in references:
+ if ref_type not in ["==", "!="]:
+ raise ValueError("Unrecognised ref_type %s" % ref_type)
+ self.references = tuple(references)
+ self.timeout = timeout
+ self.viewport_size = viewport_size
+ self.dpi = dpi
+
+ @property
+ def is_reference(self):
+ return self.source_file.name_is_reference
+
+ def meta_key(self):
+ return (self.timeout, self.viewport_size, self.dpi)
+
+ def to_json(self):
+ rv = URLManifestItem.to_json(self)
+ rv["references"] = self.references
+ if self.timeout is not None:
+ rv["timeout"] = self.timeout
+ if self.viewport_size is not None:
+ rv["viewport_size"] = self.viewport_size
+ if self.dpi is not None:
+ rv["dpi"] = self.dpi
+ return rv
+
+ @classmethod
+ def from_json(cls, manifest, tests_root, obj, source_files=None):
+ source_file = get_source_file(source_files, tests_root, manifest,
+ to_os_path(obj["path"]))
+ return cls(source_file,
+ obj["url"],
+ obj["references"],
+ url_base=manifest.url_base,
+ timeout=obj.get("timeout"),
+ viewport_size=obj.get("viewport_size"),
+ dpi=obj.get("dpi"),
+ manifest=manifest)
+
+
+class ManualTest(URLManifestItem):
+ item_type = "manual"
+
+
+class Stub(URLManifestItem):
+ item_type = "stub"
+
+
+class WebdriverSpecTest(URLManifestItem):
+ item_type = "wdspec"
+
+ def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
+ URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
+ self.timeout = timeout
diff --git a/testing/web-platform/tests/tools/manifest/log.py b/testing/web-platform/tests/tools/manifest/log.py
new file mode 100644
index 000000000..affb7d306
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/log.py
@@ -0,0 +1,8 @@
+import logging
+
+logger = logging.getLogger("manifest")
+logger.addHandler(logging.StreamHandler())
+logger.setLevel(logging.DEBUG)
+
+def get_logger():
+ return logger
diff --git a/testing/web-platform/tests/tools/manifest/manifest.py b/testing/web-platform/tests/tools/manifest/manifest.py
new file mode 100644
index 000000000..80fe70f95
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/manifest.py
@@ -0,0 +1,418 @@
+import json
+import os
+from collections import defaultdict, OrderedDict
+from six import iteritems
+
+from .item import item_types, ManualTest, WebdriverSpecTest, Stub, RefTest, TestharnessTest
+from .log import get_logger
+from .sourcefile import SourceFile
+from .utils import from_os_path, to_os_path
+
+
+CURRENT_VERSION = 3
+
+
+class ManifestError(Exception):
+ pass
+
+
+class ManifestVersionMismatch(ManifestError):
+ pass
+
+class Manifest(object):
+ def __init__(self, git_rev=None, url_base="/"):
+ # Dict of item_type: {path: set(manifest_items)}
+ self._data = dict((item_type, defaultdict(set))
+ for item_type in item_types)
+ self.rev = git_rev
+ self.url_base = url_base
+ self.local_changes = LocalChanges(self)
+ # reftest nodes arranged as {path: set(manifest_items)}
+ self.reftest_nodes = defaultdict(set)
+ self.reftest_nodes_by_url = {}
+
+ def _included_items(self, include_types=None):
+ if include_types is None:
+ include_types = item_types
+
+ for item_type in include_types:
+ paths = self._data[item_type].copy()
+ for local_types, local_paths in self.local_changes.itertypes(item_type):
+ for path, items in iteritems(local_paths):
+ paths[path] = items
+ for path in self.local_changes.iterdeleted():
+ if path in paths:
+ del paths[path]
+ if item_type == "reftest":
+ for path, items in self.local_changes.iterdeletedreftests():
+ paths[path] -= items
+ if len(paths[path]) == 0:
+ del paths[path]
+
+ yield item_type, paths
+
+ def contains_path(self, path):
+ return any(path in paths for _, paths in self._included_items())
+
+ def add(self, item):
+ if item is None:
+ return
+
+ if isinstance(item, RefTest):
+ self.reftest_nodes[item.path].add(item)
+ self.reftest_nodes_by_url[item.url] = item
+ else:
+ self._add(item)
+
+ item.manifest = self
+
+ def _add(self, item):
+ self._data[item.item_type][item.path].add(item)
+
+ def extend(self, items):
+ for item in items:
+ self.add(item)
+
+ def remove_path(self, path):
+ for item_type in item_types:
+ if path in self._data[item_type]:
+ del self._data[item_type][path]
+
+ def itertypes(self, *types):
+ if not types:
+ types = None
+ for item_type, items in self._included_items(types):
+ for item in sorted(iteritems(items)):
+ yield item
+
+ def __iter__(self):
+ for item in self.itertypes():
+ yield item
+
+ def __getitem__(self, path):
+ for _, paths in self._included_items():
+ if path in paths:
+ return paths[path]
+ raise KeyError
+
+ def get_reference(self, url):
+ if url in self.local_changes.reftest_nodes_by_url:
+ return self.local_changes.reftest_nodes_by_url[url]
+
+ if url in self.reftest_nodes_by_url:
+ return self.reftest_nodes_by_url[url]
+
+ return None
+
+ def _committed_with_path(self, rel_path):
+ rv = set()
+
+ for paths_items in self._data.itervalues():
+ rv |= paths_items.get(rel_path, set())
+
+ if rel_path in self.reftest_nodes:
+ rv |= self.reftest_nodes[rel_path]
+
+ return rv
+
+ def _committed_paths(self):
+ rv = set()
+ for paths_items in self._data.itervalues():
+ rv |= set(paths_items.keys())
+ return rv
+
+ def update(self,
+ tests_root,
+ url_base,
+ new_rev,
+ committed_changes=None,
+ local_changes=None,
+ remove_missing_local=False):
+
+ if local_changes is None:
+ local_changes = {}
+
+ if committed_changes is not None:
+ for rel_path, status in committed_changes:
+ self.remove_path(rel_path)
+ if status == "modified":
+ use_committed = rel_path in local_changes
+ source_file = SourceFile(tests_root,
+ rel_path,
+ url_base,
+ use_committed=use_committed)
+ self.extend(source_file.manifest_items())
+
+ self.local_changes = LocalChanges(self)
+
+ local_paths = set()
+ for rel_path, status in iteritems(local_changes):
+ local_paths.add(rel_path)
+
+ if status == "modified":
+ existing_items = self._committed_with_path(rel_path)
+ source_file = SourceFile(tests_root,
+ rel_path,
+ url_base,
+ use_committed=False)
+ local_items = set(source_file.manifest_items())
+
+ updated_items = local_items - existing_items
+ self.local_changes.extend(updated_items)
+ else:
+ self.local_changes.add_deleted(rel_path)
+
+ if remove_missing_local:
+ for path in self._committed_paths() - local_paths:
+ self.local_changes.add_deleted(path)
+
+ self.update_reftests()
+
+ if new_rev is not None:
+ self.rev = new_rev
+ self.url_base = url_base
+
+ def update_reftests(self):
+ default_reftests = self.compute_reftests(self.reftest_nodes)
+ all_reftest_nodes = self.reftest_nodes.copy()
+ all_reftest_nodes.update(self.local_changes.reftest_nodes)
+
+ for item in self.local_changes.iterdeleted():
+ if item in all_reftest_nodes:
+ del all_reftest_nodes[item]
+
+ modified_reftests = self.compute_reftests(all_reftest_nodes)
+
+ added_reftests = modified_reftests - default_reftests
+ # The interesting case here is not when the file is deleted,
+ # but when a reftest like A == B is changed to the form
+ # C == A == B, so that A still exists but is now a ref rather than
+ # a test.
+ removed_reftests = default_reftests - modified_reftests
+
+ dests = [(default_reftests, self._data["reftest"]),
+ (added_reftests, self.local_changes._data["reftest"]),
+ (removed_reftests, self.local_changes._deleted_reftests)]
+
+ #TODO: Warn if there exist unreachable reftest nodes
+ for source, target in dests:
+ for item in source:
+ target[item.path].add(item)
+
+ def compute_reftests(self, reftest_nodes):
+ """Given a set of reftest_nodes, return a set of all the nodes that are top-level
+ tests i.e. don't have any incoming reference links."""
+
+ reftests = set()
+
+ has_inbound = set()
+ for path, items in iteritems(reftest_nodes):
+ for item in items:
+ for ref_url, ref_type in item.references:
+ has_inbound.add(ref_url)
+
+ for path, items in iteritems(reftest_nodes):
+ for item in items:
+ if item.url in has_inbound:
+ continue
+ reftests.add(item)
+
+ return reftests
+
+ def to_json(self):
+ out_items = {
+ item_type: sorted(
+ test.to_json()
+ for _, tests in iteritems(items)
+ for test in tests
+ )
+ for item_type, items in iteritems(self._data)
+ }
+
+ reftest_nodes = OrderedDict()
+ for key, value in sorted(iteritems(self.reftest_nodes)):
+ reftest_nodes[from_os_path(key)] = [v.to_json() for v in value]
+
+ rv = {"url_base": self.url_base,
+ "rev": self.rev,
+ "local_changes": self.local_changes.to_json(),
+ "items": out_items,
+ "reftest_nodes": reftest_nodes,
+ "version": CURRENT_VERSION}
+ return rv
+
+ @classmethod
+ def from_json(cls, tests_root, obj):
+ version = obj.get("version")
+ if version != CURRENT_VERSION:
+ raise ManifestVersionMismatch
+
+ self = cls(git_rev=obj["rev"],
+ url_base=obj.get("url_base", "/"))
+ if not hasattr(obj, "items"):
+ raise ManifestError
+
+ item_classes = {"testharness": TestharnessTest,
+ "reftest": RefTest,
+ "manual": ManualTest,
+ "stub": Stub,
+ "wdspec": WebdriverSpecTest}
+
+ source_files = {}
+
+ for k, values in iteritems(obj["items"]):
+ if k not in item_types:
+ raise ManifestError
+ for v in values:
+ manifest_item = item_classes[k].from_json(self, tests_root, v,
+ source_files=source_files)
+ self._add(manifest_item)
+
+ for path, values in iteritems(obj["reftest_nodes"]):
+ path = to_os_path(path)
+ for v in values:
+ item = RefTest.from_json(self, tests_root, v,
+ source_files=source_files)
+ self.reftest_nodes[path].add(item)
+ self.reftest_nodes_by_url[v["url"]] = item
+
+ self.local_changes = LocalChanges.from_json(self,
+ tests_root,
+ obj["local_changes"],
+ source_files=source_files)
+ self.update_reftests()
+ return self
+
+
+class LocalChanges(object):
+ def __init__(self, manifest):
+ self.manifest = manifest
+ self._data = dict((item_type, defaultdict(set)) for item_type in item_types)
+ self._deleted = set()
+ self.reftest_nodes = defaultdict(set)
+ self.reftest_nodes_by_url = {}
+ self._deleted_reftests = defaultdict(set)
+
+ def add(self, item):
+ if item is None:
+ return
+
+ if isinstance(item, RefTest):
+ self.reftest_nodes[item.path].add(item)
+ self.reftest_nodes_by_url[item.url] = item
+ else:
+ self._add(item)
+
+ item.manifest = self.manifest
+
+ def _add(self, item):
+ self._data[item.item_type][item.path].add(item)
+
+ def extend(self, items):
+ for item in items:
+ self.add(item)
+
+ def add_deleted(self, path):
+ self._deleted.add(path)
+
+ def is_deleted(self, path):
+ return path in self._deleted
+
+ def itertypes(self, *types):
+ for item_type in types:
+ yield item_type, self._data[item_type]
+
+ def iterdeleted(self):
+ for item in self._deleted:
+ yield item
+
+ def iterdeletedreftests(self):
+ for item in iteritems(self._deleted_reftests):
+ yield item
+
+ def __getitem__(self, item_type):
+ return self._data[item_type]
+
+ def to_json(self):
+ reftest_nodes = {from_os_path(key): [v.to_json() for v in value]
+ for key, value in iteritems(self.reftest_nodes)}
+
+ deleted_reftests = {from_os_path(key): [v.to_json() for v in value]
+ for key, value in iteritems(self._deleted_reftests)}
+
+ rv = {"items": defaultdict(dict),
+ "reftest_nodes": reftest_nodes,
+ "deleted": [from_os_path(path) for path in self._deleted],
+ "deleted_reftests": deleted_reftests}
+
+ for test_type, paths in iteritems(self._data):
+ for path, tests in iteritems(paths):
+ path = from_os_path(path)
+ rv["items"][test_type][path] = [test.to_json() for test in tests]
+
+ return rv
+
+ @classmethod
+ def from_json(cls, manifest, tests_root, obj, source_files=None):
+ self = cls(manifest)
+ if not hasattr(obj, "items"):
+ raise ManifestError
+
+ item_classes = {"testharness": TestharnessTest,
+ "reftest": RefTest,
+ "manual": ManualTest,
+ "stub": Stub,
+ "wdspec": WebdriverSpecTest}
+
+ for test_type, paths in iteritems(obj["items"]):
+ for path, tests in iteritems(paths):
+ for test in tests:
+ manifest_item = item_classes[test_type].from_json(manifest,
+ tests_root,
+ test,
+ source_files=source_files)
+ self.add(manifest_item)
+
+ for path, values in iteritems(obj["reftest_nodes"]):
+ path = to_os_path(path)
+ for v in values:
+ item = RefTest.from_json(self.manifest, tests_root, v,
+ source_files=source_files)
+ self.reftest_nodes[path].add(item)
+ self.reftest_nodes_by_url[item.url] = item
+
+ for item in obj["deleted"]:
+ self.add_deleted(to_os_path(item))
+
+ for path, values in iteritems(obj.get("deleted_reftests", {})):
+ path = to_os_path(path)
+ for v in values:
+ item = RefTest.from_json(self.manifest, tests_root, v,
+ source_files=source_files)
+ self._deleted_reftests[path].add(item)
+
+ return self
+
+def load(tests_root, manifest):
+ logger = get_logger()
+
+ # "manifest" is a path or file-like object.
+ if isinstance(manifest, basestring):
+ if os.path.exists(manifest):
+ logger.debug("Opening manifest at %s" % manifest)
+ else:
+ logger.debug("Creating new manifest at %s" % manifest)
+ try:
+ with open(manifest) as f:
+ rv = Manifest.from_json(tests_root, json.load(f))
+ except IOError:
+ rv = Manifest(None)
+ return rv
+
+ return Manifest.from_json(tests_root, json.load(manifest))
+
+
+def write(manifest, manifest_path):
+ with open(manifest_path, "wb") as f:
+ json.dump(manifest.to_json(), f, sort_keys=True, indent=2, separators=(',', ': '))
+ f.write("\n")
diff --git a/testing/web-platform/tests/tools/manifest/sourcefile.py b/testing/web-platform/tests/tools/manifest/sourcefile.py
new file mode 100644
index 000000000..44a462707
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/sourcefile.py
@@ -0,0 +1,366 @@
+import imp
+import os
+import re
+from six.moves.urllib.parse import urljoin
+from fnmatch import fnmatch
+try:
+ from xml.etree import cElementTree as ElementTree
+except ImportError:
+ from xml.etree import ElementTree
+
+import html5lib
+
+from . import vcs
+from .item import Stub, ManualTest, WebdriverSpecTest, RefTest, TestharnessTest
+from .utils import rel_path_to_url, is_blacklisted, ContextManagerBytesIO, cached_property
+
+wd_pattern = "*.py"
+meta_re = re.compile("//\s*<meta>\s*(\w*)=(.*)$")
+
+def replace_end(s, old, new):
+ """
+ Given a string `s` that ends with `old`, replace that occurrence of `old`
+ with `new`.
+ """
+ assert s.endswith(old)
+ return s[:-len(old)] + new
+
+
+class SourceFile(object):
+ parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"),
+ "xhtml":ElementTree.parse,
+ "svg":ElementTree.parse}
+
+ def __init__(self, tests_root, rel_path, url_base, use_committed=False,
+ contents=None):
+ """Object representing a file in a source tree.
+
+ :param tests_root: Path to the root of the source tree
+ :param rel_path: File path relative to tests_root
+ :param url_base: Base URL used when converting file paths to urls
+ :param use_committed: Work with the last committed version of the file
+ rather than the on-disk version.
+ :param contents: Byte array of the contents of the file or ``None``.
+ """
+
+ assert not (use_committed and contents is not None)
+
+ self.tests_root = tests_root
+ self.rel_path = rel_path
+ self.url_base = url_base
+ self.use_committed = use_committed
+ self.contents = contents
+
+ self.url = rel_path_to_url(rel_path, url_base)
+ self.path = os.path.join(tests_root, rel_path)
+
+ self.dir_path, self.filename = os.path.split(self.path)
+ self.name, self.ext = os.path.splitext(self.filename)
+
+ self.type_flag = None
+ if "-" in self.name:
+ self.type_flag = self.name.rsplit("-", 1)[1].split(".")[0]
+
+ self.meta_flags = self.name.split(".")[1:]
+
+ def __getstate__(self):
+ # Remove computed properties if we pickle this class
+ rv = self.__dict__.copy()
+
+ if "__cached_properties__" in rv:
+ cached_properties = rv["__cached_properties__"]
+ for key in rv.keys():
+ if key in cached_properties:
+ del rv[key]
+ del rv["__cached_properties__"]
+ return rv
+
+ def name_prefix(self, prefix):
+ """Check if the filename starts with a given prefix
+
+ :param prefix: The prefix to check"""
+ return self.name.startswith(prefix)
+
+ def is_dir(self):
+ """Return whether this file represents a directory."""
+ if self.contents is not None:
+ return False
+
+ return os.path.isdir(self.rel_path)
+
+ def open(self):
+ """
+ Return either
+ * the contents specified in the constructor, if any;
+ * the contents of the file when last committed, if use_committed is true; or
+ * a File object opened for reading the file contents.
+ """
+
+ if self.contents is not None:
+ file_obj = ContextManagerBytesIO(self.contents)
+ elif self.use_committed:
+ git = vcs.get_git_func(os.path.dirname(__file__))
+ blob = git("show", "HEAD:%s" % self.rel_path)
+ file_obj = ContextManagerBytesIO(blob)
+ else:
+ file_obj = open(self.path, 'rb')
+ return file_obj
+
+ @property
+ def name_is_non_test(self):
+ """Check if the file name matches the conditions for the file to
+ be a non-test file"""
+ return (self.is_dir() or
+ self.name_prefix("MANIFEST") or
+ self.filename.startswith(".") or
+ is_blacklisted(self.url))
+
+ @property
+ def name_is_stub(self):
+ """Check if the file name matches the conditions for the file to
+ be a stub file"""
+ return self.name_prefix("stub-")
+
+ @property
+ def name_is_manual(self):
+ """Check if the file name matches the conditions for the file to
+ be a manual test file"""
+ return self.type_flag == "manual"
+
+ @property
+ def name_is_multi_global(self):
+ """Check if the file name matches the conditions for the file to
+ be a multi-global js test file"""
+ return "any" in self.meta_flags and self.ext == ".js"
+
+ @property
+ def name_is_worker(self):
+ """Check if the file name matches the conditions for the file to
+ be a worker js test file"""
+ return "worker" in self.meta_flags and self.ext == ".js"
+
+ @property
+ def name_is_webdriver(self):
+ """Check if the file name matches the conditions for the file to
+ be a webdriver spec test file"""
+ # wdspec tests are in subdirectories of /webdriver excluding __init__.py
+ # files.
+ rel_dir_tree = self.rel_path.split(os.path.sep)
+ return (rel_dir_tree[0] == "webdriver" and
+ len(rel_dir_tree) > 1 and
+ self.filename != "__init__.py" and
+ fnmatch(self.filename, wd_pattern))
+
+ @property
+ def name_is_reference(self):
+ """Check if the file name matches the conditions for the file to
+ be a reference file (not a reftest)"""
+ return self.type_flag in ("ref", "notref")
+
+ @property
+ def markup_type(self):
+ """Return the type of markup contained in a file, based on its extension,
+ or None if it doesn't contain markup"""
+ ext = self.ext
+
+ if not ext:
+ return None
+ if ext[0] == ".":
+ ext = ext[1:]
+ if ext in ["html", "htm"]:
+ return "html"
+ if ext in ["xhtml", "xht", "xml"]:
+ return "xhtml"
+ if ext == "svg":
+ return "svg"
+ return None
+
+ @cached_property
+ def root(self):
+ """Return an ElementTree Element for the root node of the file if it contains
+ markup, or None if it does not"""
+ if not self.markup_type:
+ return None
+
+ parser = self.parsers[self.markup_type]
+
+ with self.open() as f:
+ try:
+ tree = parser(f)
+ except Exception:
+ return None
+
+ if hasattr(tree, "getroot"):
+ root = tree.getroot()
+ else:
+ root = tree
+
+ return root
+
+ @cached_property
+ def timeout_nodes(self):
+ """List of ElementTree Elements corresponding to nodes in a test that
+ specify timeouts"""
+ return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']")
+
+ @cached_property
+ def timeout(self):
+ """The timeout of a test or reference file. "long" if the file has an extended timeout
+ or None otherwise"""
+ if self.name_is_worker:
+ with self.open() as f:
+ for line in f:
+ m = meta_re.match(line)
+ if m and m.groups()[0] == "timeout":
+ if m.groups()[1].lower() == "long":
+ return "long"
+ return
+
+ if not self.root:
+ return
+
+ if self.timeout_nodes:
+ timeout_str = self.timeout_nodes[0].attrib.get("content", None)
+ if timeout_str and timeout_str.lower() == "long":
+ return timeout_str
+
+ @cached_property
+ def viewport_nodes(self):
+ """List of ElementTree Elements corresponding to nodes in a test that
+ specify viewport sizes"""
+ return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='viewport-size']")
+
+ @cached_property
+ def viewport_size(self):
+ """The viewport size of a test or reference file"""
+ if not self.root:
+ return None
+
+ if not self.viewport_nodes:
+ return None
+
+ return self.viewport_nodes[0].attrib.get("content", None)
+
+ @cached_property
+ def dpi_nodes(self):
+ """List of ElementTree Elements corresponding to nodes in a test that
+ specify device pixel ratios"""
+ return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='device-pixel-ratio']")
+
+ @cached_property
+ def dpi(self):
+ """The device pixel ratio of a test or reference file"""
+ if not self.root:
+ return None
+
+ if not self.dpi_nodes:
+ return None
+
+ return self.dpi_nodes[0].attrib.get("content", None)
+
+ @cached_property
+ def testharness_nodes(self):
+ """List of ElementTree Elements corresponding to nodes representing a
+ testharness.js script"""
+ return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']")
+
+ @cached_property
+ def content_is_testharness(self):
+ """Boolean indicating whether the file content represents a
+ testharness.js test"""
+ if not self.root:
+ return None
+ return bool(self.testharness_nodes)
+
+ @cached_property
+ def variant_nodes(self):
+ """List of ElementTree Elements corresponding to nodes representing a
+ test variant"""
+ return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']")
+
+ @cached_property
+ def test_variants(self):
+ rv = []
+ for element in self.variant_nodes:
+ if "content" in element.attrib:
+ variant = element.attrib["content"]
+ assert variant == "" or variant[0] in ["#", "?"]
+ rv.append(variant)
+
+ if not rv:
+ rv = [""]
+
+ return rv
+
+ @cached_property
+ def reftest_nodes(self):
+ """List of ElementTree Elements corresponding to nodes representing a
+ to a reftest <link>"""
+ if not self.root:
+ return []
+
+ match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']")
+ mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']")
+ return match_links + mismatch_links
+
+ @cached_property
+ def references(self):
+ """List of (ref_url, relation) tuples for any reftest references specified in
+ the file"""
+ rv = []
+ rel_map = {"match": "==", "mismatch": "!="}
+ for item in self.reftest_nodes:
+ if "href" in item.attrib:
+ ref_url = urljoin(self.url, item.attrib["href"])
+ ref_type = rel_map[item.attrib["rel"]]
+ rv.append((ref_url, ref_type))
+ return rv
+
+ @cached_property
+ def content_is_ref_node(self):
+ """Boolean indicating whether the file is a non-leaf node in a reftest
+ graph (i.e. if it contains any <link rel=[mis]match>"""
+ return bool(self.references)
+
+ def manifest_items(self):
+ """List of manifest items corresponding to the file. There is typically one
+ per test, but in the case of reftests a node may have corresponding manifest
+ items without being a test itself."""
+
+ if self.name_is_non_test:
+ rv = []
+
+ elif self.name_is_stub:
+ rv = [Stub(self, self.url)]
+
+ elif self.name_is_manual:
+ rv = [ManualTest(self, self.url)]
+
+ elif self.name_is_multi_global:
+ rv = [
+ TestharnessTest(self, replace_end(self.url, ".any.js", ".any.html")),
+ TestharnessTest(self, replace_end(self.url, ".any.js", ".any.worker")),
+ ]
+
+ elif self.name_is_worker:
+ rv = [TestharnessTest(self, replace_end(self.url, ".worker.js", ".worker"),
+ timeout=self.timeout)]
+
+ elif self.name_is_webdriver:
+ rv = [WebdriverSpecTest(self, self.url)]
+
+ elif self.content_is_testharness:
+ rv = []
+ for variant in self.test_variants:
+ url = self.url + variant
+ rv.append(TestharnessTest(self, url, timeout=self.timeout))
+
+ elif self.content_is_ref_node:
+ rv = [RefTest(self, self.url, self.references, timeout=self.timeout,
+ viewport_size=self.viewport_size, dpi=self.dpi)]
+
+ else:
+ # If nothing else it's a helper file, which we don't have a specific type for
+ rv = []
+
+ return rv
diff --git a/testing/web-platform/tests/tools/manifest/tests/__init__.py b/testing/web-platform/tests/tools/manifest/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/tests/__init__.py
diff --git a/testing/web-platform/tests/tools/manifest/tests/test_manifest.py b/testing/web-platform/tests/tools/manifest/tests/test_manifest.py
new file mode 100644
index 000000000..11ac6d331
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/tests/test_manifest.py
@@ -0,0 +1,80 @@
+from .. import manifest, item as manifestitem, sourcefile
+
+
+def test_local_reftest_add():
+ m = manifest.Manifest()
+ s = sourcefile.SourceFile("/", "test", "/")
+ test = manifestitem.RefTest(s, "/test", [("/ref", "==")])
+ m.local_changes.add(test)
+ m.update_reftests()
+ assert list(m) == [(test.path, {test})]
+
+
+def test_local_reftest_delete_path():
+ m = manifest.Manifest()
+ s = sourcefile.SourceFile("/", "test", "/")
+ test = manifestitem.RefTest(s, "/test", [("/ref", "==")])
+ m.add(test)
+ m.local_changes.add_deleted(test.path)
+ m.update_reftests()
+ assert list(m) == []
+
+
+def test_local_reftest_adjusted():
+ m = manifest.Manifest()
+ s = sourcefile.SourceFile("/", "test", "/")
+ test = manifestitem.RefTest(s, "/test", [("/ref", "==")])
+ m.add(test)
+ m.update_reftests()
+
+ assert m.compute_reftests({test.path: {test}}) == {test}
+
+ assert list(m) == [(test.path, {test})]
+
+ s_1 = sourcefile.SourceFile("/", "test-1", "/")
+ test_1 = manifestitem.RefTest(s_1, "/test-1", [("/test", "==")])
+ m.local_changes.add(test_1)
+ m.update_reftests()
+
+ assert m.compute_reftests({test.path: {test}, test_1.path: {test_1}}) == {test_1}
+
+ assert list(m) == [(test_1.path, {test_1})]
+
+
+def test_manifest_to_json():
+ m = manifest.Manifest()
+ s = sourcefile.SourceFile("/", "test", "/")
+ test = manifestitem.RefTest(s, "/test", [("/ref", "==")])
+ m.add(test)
+ s_1 = sourcefile.SourceFile("/", "test-1", "/")
+ test_1 = manifestitem.RefTest(s_1, "/test-1", [("/test", "==")])
+ m.local_changes.add(test_1)
+ m.local_changes.add_deleted(test.path)
+ m.update_reftests()
+
+ json_str = m.to_json()
+ loaded = manifest.Manifest.from_json("/", json_str)
+
+ assert list(loaded) == list(m)
+
+ assert loaded.to_json() == json_str
+
+
+def test_reftest_computation_chain():
+ m = manifest.Manifest()
+
+ s1 = sourcefile.SourceFile("/", "test1", "/")
+ s2 = sourcefile.SourceFile("/", "test2", "/")
+
+ test1 = manifestitem.RefTest(s1, "/test1", [("/test3", "==")])
+ test2 = manifestitem.RefTest(s2, "/test2", [("/test1", "==")])
+ m.add(test1)
+ m.add(test2)
+
+ m.update_reftests()
+
+ assert m.reftest_nodes == {'test1': {test1},
+ 'test2': {test2}}
+
+ assert list(m) == [("test2", {test2})]
+ assert list(m.local_changes.itertypes()) == []
diff --git a/testing/web-platform/tests/tools/manifest/tests/test_sourcefile.py b/testing/web-platform/tests/tools/manifest/tests/test_sourcefile.py
new file mode 100644
index 000000000..da51406c7
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/tests/test_sourcefile.py
@@ -0,0 +1,251 @@
+from ..sourcefile import SourceFile
+
+def create(filename, contents=b""):
+ assert isinstance(contents, bytes)
+ return SourceFile("/", filename, "/", contents=contents)
+
+
+def items(s):
+ return [
+ (item.item_type, item.url)
+ for item in s.manifest_items()
+ ]
+
+
+def test_name_is_non_test():
+ non_tests = [
+ ".gitignore",
+ ".travis.yml",
+ "MANIFEST.json",
+ "tools/test.html",
+ "resources/test.html",
+ "common/test.html",
+ "conformance-checkers/test.html",
+ ]
+
+ for rel_path in non_tests:
+ s = create(rel_path)
+ assert s.name_is_non_test
+
+ assert not s.content_is_testharness
+
+ assert items(s) == []
+
+
+def test_name_is_manual():
+ manual_tests = [
+ "html/test-manual.html",
+ "html/test-manual.xhtml",
+ "html/test-manual.https.html",
+ "html/test-manual.https.xhtml"
+ ]
+
+ for rel_path in manual_tests:
+ s = create(rel_path)
+ assert not s.name_is_non_test
+ assert s.name_is_manual
+
+ assert not s.content_is_testharness
+
+ assert items(s) == [("manual", "/" + rel_path)]
+
+
+def test_worker():
+ s = create("html/test.worker.js")
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert s.name_is_worker
+ assert not s.name_is_reference
+
+ assert not s.content_is_testharness
+
+ assert items(s) == [("testharness", "/html/test.worker")]
+
+def test_worker_long_timeout():
+ s = create("html/test.worker.js",
+ contents="""// <meta> timeout=long
+importScripts('/resources/testharnes.js')
+test()""")
+
+ manifest_items = s.manifest_items()
+ assert len(manifest_items) == 1
+ assert manifest_items[0].timeout == "long"
+
+
+def test_multi_global():
+ s = create("html/test.any.js")
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert not s.content_is_testharness
+
+ assert items(s) == [
+ ("testharness", "/html/test.any.html"),
+ ("testharness", "/html/test.any.worker"),
+ ]
+
+
+def test_testharness():
+ content = b"<script src=/resources/testharness.js></script>"
+
+ for ext in ["htm", "html"]:
+ filename = "html/test." + ext
+ s = create(filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert s.content_is_testharness
+
+ assert items(s) == [("testharness", "/" + filename)]
+
+
+def test_relative_testharness():
+ content = b"<script src=../resources/testharness.js></script>"
+
+ for ext in ["htm", "html"]:
+ filename = "html/test." + ext
+ s = create(filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert not s.content_is_testharness
+
+ assert items(s) == []
+
+
+def test_testharness_xhtml():
+ content = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</head>
+<body/>
+</html>
+"""
+
+ for ext in ["xhtml", "xht", "xml"]:
+ filename = "html/test." + ext
+ s = create(filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert s.content_is_testharness
+
+ assert items(s) == [("testharness", "/" + filename)]
+
+
+def test_relative_testharness_xhtml():
+ content = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<script src="../resources/testharness.js"></script>
+<script src="../resources/testharnessreport.js"></script>
+</head>
+<body/>
+</html>
+"""
+
+ for ext in ["xhtml", "xht", "xml"]:
+ filename = "html/test." + ext
+ s = create(filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert not s.content_is_testharness
+
+ assert items(s) == []
+
+
+def test_testharness_svg():
+ content = b"""\
+<?xml version="1.0" encoding="UTF-8"?>
+<svg xmlns="http://www.w3.org/2000/svg"
+ xmlns:h="http://www.w3.org/1999/xhtml"
+ version="1.1"
+ width="100%" height="100%" viewBox="0 0 400 400">
+<title>Null test</title>
+<h:script src="/resources/testharness.js"/>
+<h:script src="/resources/testharnessreport.js"/>
+</svg>
+"""
+
+ filename = "html/test.svg"
+ s = create(filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert s.root
+ assert s.content_is_testharness
+
+ assert items(s) == [("testharness", "/" + filename)]
+
+
+def test_relative_testharness_svg():
+ content = b"""\
+<?xml version="1.0" encoding="UTF-8"?>
+<svg xmlns="http://www.w3.org/2000/svg"
+ xmlns:h="http://www.w3.org/1999/xhtml"
+ version="1.1"
+ width="100%" height="100%" viewBox="0 0 400 400">
+<title>Null test</title>
+<h:script src="../resources/testharness.js"/>
+<h:script src="../resources/testharnessreport.js"/>
+</svg>
+"""
+
+ filename = "html/test.svg"
+ s = create(filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert s.root
+ assert not s.content_is_testharness
+
+ assert items(s) == []
+
+
+def test_testharness_ext():
+ content = b"<script src=/resources/testharness.js></script>"
+
+ for filename in ["test", "test.test"]:
+ s = create("html/" + filename, content)
+
+ assert not s.name_is_non_test
+ assert not s.name_is_manual
+ assert not s.name_is_multi_global
+ assert not s.name_is_worker
+ assert not s.name_is_reference
+
+ assert not s.root
+ assert not s.content_is_testharness
+
+ assert items(s) == []
diff --git a/testing/web-platform/tests/tools/manifest/tests/test_utils.py b/testing/web-platform/tests/tools/manifest/tests/test_utils.py
new file mode 100644
index 000000000..a95bd47e9
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/tests/test_utils.py
@@ -0,0 +1,28 @@
+import pytest
+
+from ..utils import is_blacklisted
+
+
+@pytest.mark.parametrize("url", [
+ "/foo",
+ "/tools/foo",
+ "/common/foo",
+ "/conformance-checkers/foo",
+ "/_certs/foo",
+ "/resources/foo",
+ "/support/foo",
+ "/foo/resources/bar",
+ "/foo/support/bar"
+])
+def test_is_blacklisted(url):
+ assert is_blacklisted(url) is True
+
+
+@pytest.mark.parametrize("url", [
+ "/foo/tools/bar",
+ "/foo/common/bar",
+ "/foo/conformance-checkers/bar",
+ "/foo/_certs/bar"
+])
+def test_not_is_blacklisted(url):
+ assert is_blacklisted(url) is False
diff --git a/testing/web-platform/tests/tools/manifest/tree.py b/testing/web-platform/tests/tools/manifest/tree.py
new file mode 100644
index 000000000..25a5f212f
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/tree.py
@@ -0,0 +1,168 @@
+import os
+from six.moves import cStringIO as StringIO
+from fnmatch import fnmatch
+
+from . import vcs
+from .log import get_logger
+from .utils import is_blacklisted, rel_path_to_url
+
+def chunks(data, n):
+ for i in range(0, len(data) - 1, n):
+ yield data[i:i+n]
+
+class TestTree(object):
+ def __init__(self, tests_root, url_base):
+ self.tests_root = tests_root
+ self.url_base = url_base
+ self.logger = get_logger()
+
+ def current_rev(self):
+ pass
+
+ def local_changes(self):
+ pass
+
+ def committed_changes(self, base_rev=None):
+ pass
+
+
+class GitTree(TestTree):
+ def __init__(self, tests_root, url_base):
+ TestTree.__init__(self, tests_root, url_base)
+ self.git = self.setup_git()
+
+ def setup_git(self):
+ assert vcs.is_git_repo(self.tests_root)
+ return vcs.get_git_func(self.tests_root)
+
+ def current_rev(self):
+ return self.git("rev-parse", "HEAD").strip()
+
+ def local_changes(self, path=None):
+ # -z is stable like --porcelain; see the git status documentation for details
+ cmd = ["status", "-z", "--ignore-submodules=all"]
+ if path is not None:
+ cmd.extend(["--", path])
+
+ rv = {}
+
+ data = self.git(*cmd)
+ if data == "":
+ return rv
+
+ assert data[-1] == "\0"
+ f = StringIO(data)
+
+ while f.tell() < len(data):
+ # First two bytes are the status in the stage (index) and working tree, respectively
+ staged = f.read(1)
+ worktree = f.read(1)
+ assert f.read(1) == " "
+
+ if staged == "R":
+ # When a file is renamed, there are two files, the source and the destination
+ files = 2
+ else:
+ files = 1
+
+ filenames = []
+
+ for i in range(files):
+ filenames.append("")
+ char = f.read(1)
+ while char != "\0":
+ filenames[-1] += char
+ char = f.read(1)
+
+ if not is_blacklisted(rel_path_to_url(filenames[0], self.url_base)):
+ rv.update(self.local_status(staged, worktree, filenames))
+
+ return rv
+
+ def committed_changes(self, base_rev=None):
+ if base_rev is None:
+ self.logger.debug("Adding all changesets to the manifest")
+ return [(item, "modified") for item in self.paths()]
+
+ self.logger.debug("Updating the manifest from %s to %s" % (base_rev, self.current_rev()))
+ rv = []
+ data = self.git("diff", "-z", "--name-status", base_rev + "..HEAD")
+ items = data.split("\0")
+ for status, filename in chunks(items, 2):
+ if is_blacklisted(rel_path_to_url(filename, self.url_base)):
+ continue
+ if status == "D":
+ rv.append((filename, "deleted"))
+ else:
+ rv.append((filename, "modified"))
+ return rv
+
+ def paths(self):
+ data = self.git("ls-tree", "--name-only", "--full-tree", "-r", "HEAD")
+ return [item for item in data.split("\n") if not item.endswith(os.path.sep)]
+
+ def local_status(self, staged, worktree, filenames):
+ # Convert the complex range of statuses that git can have to two values
+ # we care about; "modified" and "deleted" and return a dictionary mapping
+ # filenames to statuses
+
+ rv = {}
+
+ if (staged, worktree) in [("D", "D"), ("A", "U"), ("U", "D"), ("U", "A"),
+ ("D", "U"), ("A", "A"), ("U", "U")]:
+ raise Exception("Can't operate on tree containing unmerged paths")
+
+ if staged == "R":
+ assert len(filenames) == 2
+ dest, src = filenames
+ rv[dest] = "modified"
+ rv[src] = "deleted"
+ else:
+ assert len(filenames) == 1
+
+ filename = filenames[0]
+
+ if staged == "D" or worktree == "D":
+ # Actually if something is deleted in the index but present in the worktree
+ # it will get included by having a status of both "D " and "??".
+ # It isn't clear whether that's a bug
+ rv[filename] = "deleted"
+ elif staged == "?" and worktree == "?":
+ # A new file. If it's a directory, recurse into it
+ if os.path.isdir(os.path.join(self.tests_root, filename)):
+ if filename[-1] != '/':
+ filename += '/'
+ rv.update(self.local_changes(filename))
+ else:
+ rv[filename] = "modified"
+ else:
+ rv[filename] = "modified"
+
+ return rv
+
+class NoVCSTree(TestTree):
+ """Subclass that doesn't depend on git"""
+
+ ignore = ["*.py[c|0]", "*~", "#*"]
+
+ def current_rev(self):
+ return None
+
+ def local_changes(self):
+ # Put all files into local_changes and rely on Manifest.update to de-dupe
+ # changes that in fact committed at the base rev.
+
+ rv = []
+ for dir_path, dir_names, filenames in os.walk(self.tests_root):
+ for filename in filenames:
+ if any(fnmatch(filename, pattern) for pattern in self.ignore):
+ continue
+ rel_path = os.path.relpath(os.path.join(dir_path, filename),
+ self.tests_root)
+ if is_blacklisted(rel_path_to_url(rel_path, self.url_base)):
+ continue
+ rv.append((rel_path, "modified"))
+ return dict(rv)
+
+ def committed_changes(self, base_rev=None):
+ return None
diff --git a/testing/web-platform/tests/tools/manifest/update.py b/testing/web-platform/tests/tools/manifest/update.py
new file mode 100644
index 000000000..8460af257
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/update.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+import argparse
+import imp
+import os
+import sys
+
+import manifest
+from . import vcs
+from .log import get_logger
+from .tree import GitTree, NoVCSTree
+
+here = os.path.dirname(__file__)
+
+def update(tests_root, url_base, manifest, ignore_local=False):
+ if vcs.is_git_repo(tests_root):
+ tests_tree = GitTree(tests_root, url_base)
+ remove_missing_local = False
+ else:
+ tests_tree = NoVCSTree(tests_root, url_base)
+ remove_missing_local = not ignore_local
+
+ if not ignore_local:
+ local_changes = tests_tree.local_changes()
+ else:
+ local_changes = None
+
+ manifest.update(tests_root,
+ url_base,
+ tests_tree.current_rev(),
+ tests_tree.committed_changes(manifest.rev),
+ local_changes,
+ remove_missing_local=remove_missing_local)
+
+
+def update_from_cli(**kwargs):
+ tests_root = kwargs["tests_root"]
+ path = kwargs["path"]
+ assert tests_root is not None
+
+ m = None
+ logger = get_logger()
+
+ if not kwargs.get("rebuild", False):
+ try:
+ m = manifest.load(tests_root, path)
+ except manifest.ManifestVersionMismatch:
+ logger.info("Manifest version changed, rebuilding")
+ m = None
+ else:
+ logger.info("Updating manifest")
+
+ if m is None:
+ m = manifest.Manifest(None)
+
+
+ update(tests_root,
+ kwargs["url_base"],
+ m,
+ ignore_local=kwargs.get("ignore_local", False))
+ manifest.write(m, path)
+
+
+def abs_path(path):
+ return os.path.abspath(os.path.expanduser(path))
+
+
+def create_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-p", "--path", type=abs_path, help="Path to manifest file.")
+ parser.add_argument(
+ "--tests-root", type=abs_path, help="Path to root of tests.")
+ parser.add_argument(
+ "-r", "--rebuild", action="store_true", default=False,
+ help="Force a full rebuild of the manifest.")
+ parser.add_argument(
+ "--ignore-local", action="store_true", default=False,
+ help="Don't include uncommitted local changes in the manifest.")
+ parser.add_argument(
+ "--url-base", action="store", default="/",
+ help="Base url to use as the mount point for tests in this manifest.")
+ return parser
+
+
+def find_top_repo():
+ path = here
+ rv = None
+ while path != "/":
+ if vcs.is_git_repo(path):
+ rv = path
+ path = os.path.abspath(os.path.join(path, os.pardir))
+
+ return rv
+
+def main(default_tests_root=None):
+ opts = create_parser().parse_args()
+
+ if opts.tests_root is None:
+ tests_root = None
+ if default_tests_root is not None:
+ tests_root = default_tests_root
+ else:
+ tests_root = find_top_repo()
+
+ if tests_root is None:
+ print >> sys.stderr, """No git repo found; could not determine test root.
+Run again with --test-root"""
+ sys.exit(1)
+
+ opts.tests_root = tests_root
+
+ if opts.path is None:
+ opts.path = os.path.join(opts.tests_root, "MANIFEST.json")
+
+ update_from_cli(**vars(opts))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/web-platform/tests/tools/manifest/utils.py b/testing/web-platform/tests/tools/manifest/utils.py
new file mode 100644
index 000000000..c6b27229c
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/utils.py
@@ -0,0 +1,52 @@
+import os
+from six import BytesIO
+
+blacklist = ["/tools/", "/resources/", "/common/", "/conformance-checkers/", "/_certs/"]
+blacklist_in = ["/resources/", "/support/"]
+
+def rel_path_to_url(rel_path, url_base="/"):
+ assert not os.path.isabs(rel_path)
+ if url_base[0] != "/":
+ url_base = "/" + url_base
+ if url_base[-1] != "/":
+ url_base += "/"
+ return url_base + rel_path.replace(os.sep, "/")
+
+def is_blacklisted(url):
+ if "/" not in url[1:]:
+ return True
+ for item in blacklist:
+ if url.startswith(item):
+ return True
+ for item in blacklist_in:
+ if item in url:
+ return True
+ return False
+
+def from_os_path(path):
+ return path.replace(os.path.sep, "/")
+
+def to_os_path(path):
+ return path.replace("/", os.path.sep)
+
+class ContextManagerBytesIO(BytesIO):
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.close()
+
+class cached_property(object):
+ def __init__(self, func):
+ self.func = func
+ self.__doc__ = getattr(func, "__doc__")
+ self.name = func.__name__
+
+ def __get__(self, obj, cls=None):
+ if obj is None:
+ return self
+
+ if self.name not in obj.__dict__:
+ obj.__dict__[self.name] = self.func(obj)
+ obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
+ return obj.__dict__[self.name]
diff --git a/testing/web-platform/tests/tools/manifest/vcs.py b/testing/web-platform/tests/tools/manifest/vcs.py
new file mode 100644
index 000000000..93bd445e1
--- /dev/null
+++ b/testing/web-platform/tests/tools/manifest/vcs.py
@@ -0,0 +1,25 @@
+import os
+import subprocess
+
+def get_git_func(repo_path):
+ def git(cmd, *args):
+ full_cmd = ["git", cmd] + list(args)
+ return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
+ return git
+
+
+def is_git_repo(tests_root):
+ return os.path.exists(os.path.join(tests_root, ".git"))
+
+
+_repo_root = None
+def get_repo_root(initial_dir=None):
+ global _repo_root
+
+ if initial_dir is None:
+ initial_dir = os.path.dirname(__file__)
+
+ if _repo_root is None:
+ git = get_git_func(initial_dir)
+ _repo_root = git("rev-parse", "--show-toplevel").rstrip()
+ return _repo_root
diff --git a/testing/web-platform/tests/tools/py/AUTHORS b/testing/web-platform/tests/tools/py/AUTHORS
new file mode 100644
index 000000000..8c0cf9b71
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/AUTHORS
@@ -0,0 +1,24 @@
+Holger Krekel, holger at merlinux eu
+Benjamin Peterson, benjamin at python org
+Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de
+Guido Wesdorp, johnny at johnnydebris net
+Samuele Pedroni, pedronis at openend se
+Carl Friedrich Bolz, cfbolz at gmx de
+Armin Rigo, arigo at tunes org
+Maciek Fijalkowski, fijal at genesilico pl
+Brian Dorsey, briandorsey at gmail com
+Floris Bruynooghe, flub at devork be
+merlinux GmbH, Germany, office at merlinux eu
+
+Contributors include::
+
+Ross Lawley
+Ralf Schmitt
+Chris Lamb
+Harald Armin Massa
+Martijn Faassen
+Ian Bicking
+Jan Balster
+Grig Gheorghiu
+Bob Ippolito
+Christian Tismer
diff --git a/testing/web-platform/tests/tools/py/CHANGELOG b/testing/web-platform/tests/tools/py/CHANGELOG
new file mode 100644
index 000000000..712fc4c53
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/CHANGELOG
@@ -0,0 +1,1089 @@
+1.4.31
+==================================================
+
+- fix local().copy(dest, mode=True) to also work
+ with unicode.
+
+- pass better error message with svn EEXIST paths
+
+1.4.30
+==================================================
+
+- fix issue68 an assert with a multiline list comprehension
+ was not reported correctly. Thanks Henrik Heibuerger.
+
+
+1.4.29
+==================================================
+
+- fix issue55: revert a change to the statement finding algorithm
+ which is used by pytest for generating tracebacks.
+ Thanks Daniel Hahler for initial analysis.
+
+- fix pytest issue254 for when traceback rendering can't
+ find valid source code. Thanks Ionel Cristian Maries.
+
+
+1.4.28
+==================================================
+
+- fix issue64 -- dirpath regression when "abs=True" is passed.
+ Thanks Gilles Dartiguelongue.
+
+1.4.27
+==================================================
+
+- fix issue59: point to new repo site
+
+- allow a new ensuresyspath="append" mode for py.path.local.pyimport()
+ so that a neccessary import path is appended instead of prepended to
+ sys.path
+
+- strike undocumented, untested argument to py.path.local.pypkgpath
+
+- speed up py.path.local.dirpath by a factor of 10
+
+1.4.26
+==================================================
+
+- avoid calling normpath twice in py.path.local
+
+- py.builtin._reraise properly reraises under Python3 now.
+
+- fix issue53 - remove module index, thanks jenisys.
+
+- allow posix path separators when "fnmatch" is called.
+ Thanks Christian Long for the complete PR.
+
+1.4.25
+==================================================
+
+- fix issue52: vaguely fix py25 compat of py.path.local (it's not
+ officially supported), also fix docs
+
+- fix pytest issue 589: when checking if we have a recursion error
+ check for the specific "maximum recursion depth" text of the exception.
+
+1.4.24
+==================================================
+
+- Fix retrieving source when an else: line has an other statement on
+ the same line.
+
+- add localpath read_text/write_text/read_bytes/write_bytes methods
+ as shortcuts and clearer bytes/text interfaces for read/write.
+ Adapted from a PR from Paul Moore.
+
+
+1.4.23
+==================================================
+
+- use newer apipkg version which makes attribute access on
+ alias modules resolve to None rather than an ImportError.
+ This helps with code that uses inspect.getframeinfo()
+ on py34 which causes a complete walk on sys.modules
+ thus triggering the alias module to resolve and blowing
+ up with ImportError. The negative side is that something
+ like "py.test.X" will now result in None instead of "importerror: pytest"
+ if pytest is not installed. But you shouldn't import "py.test"
+ anyway anymore.
+
+- adapt one svn test to only check for any exception instead
+ of specific ones because different svn versions cause different
+ errors and we don't care.
+
+
+1.4.22
+==================================================
+
+- refactor class-level registry on ForkedFunc child start/finish
+ event to become instance based (i.e. passed into the constructor)
+
+1.4.21
+==================================================
+
+- ForkedFunc now has class-level register_on_start/on_exit()
+ methods to allow adding information in the boxed process.
+ Thanks Marc Schlaich.
+
+- ForkedFunc in the child opens in "auto-flush" mode for
+ stdout/stderr so that when a subprocess dies you can see
+ its output even if it didn't flush itself.
+
+- refactor traceback generation in light of pytest issue 364
+ (shortening tracebacks). you can now set a new traceback style
+ on a per-entry basis such that a caller can force entries to be
+ isplayed as short or long entries.
+
+- win32: py.path.local.sysfind(name) will preferrably return files with
+ extensions so that if "X" and "X.bat" or "X.exe" is on the PATH,
+ one of the latter two will be returned.
+
+1.4.20
+==================================================
+
+- ignore unicode decode errors in xmlescape. Thanks Anatoly Bubenkoff.
+
+- on python2 modify traceback.format_exception_only to match python3
+ behaviour, namely trying to print unicode for Exception instances
+
+- use a safer way for serializing exception reports (helps to fix
+ pytest issue413)
+
+Changes between 1.4.18 and 1.4.19
+==================================================
+
+- merge in apipkg fixes
+
+- some micro-optimizations in py/_code/code.py for speeding
+ up pytest runs. Thanks Alex Gaynor for initiative.
+
+- check PY_COLORS=1 or PY_COLORS=0 to force coloring/not-coloring
+ for py.io.TerminalWriter() independently from capabilities
+ of the output file. Thanks Marc Abramowitz for the PR.
+
+- some fixes to unicode handling in assertion handling.
+ Thanks for the PR to Floris Bruynooghe. (This helps
+ to fix pytest issue 319).
+
+- depend on setuptools presence, remove distribute_setup
+
+Changes between 1.4.17 and 1.4.18
+==================================================
+
+- introduce path.ensure_dir() as a synonym for ensure(..., dir=1)
+
+- some unicode/python3 related fixes wrt to path manipulations
+ (if you start passing unicode particular in py2 you might
+ still get problems, though)
+
+Changes between 1.4.16 and 1.4.17
+==================================================
+
+- make py.io.TerminalWriter() prefer colorama if it is available
+ and avoid empty lines when separator-lines are printed by
+ being defensive and reducing the working terminalwidth by 1
+
+- introduce optional "expanduser" argument to py.path.local
+ to that local("~", expanduser=True) gives the home
+ directory of "user".
+
+Changes between 1.4.15 and 1.4.16
+==================================================
+
+- fix issue35 - define __gt__ ordering between a local path
+ and strings
+
+- fix issue36 - make chdir() work even if os.getcwd() fails.
+
+- add path.exists/isdir/isfile/islink shortcuts
+
+- introduce local path.as_cwd() context manager.
+
+- introduce p.write(ensure=1) and p.open(ensure=1)
+ where ensure triggers creation of neccessary parent
+ dirs.
+
+
+Changes between 1.4.14 and 1.4.15
+==================================================
+
+- majorly speed up some common calling patterns with
+ LocalPath.listdir()/join/check/stat functions considerably.
+
+- fix an edge case with fnmatch where a glob style pattern appeared
+ in an absolute path.
+
+Changes between 1.4.13 and 1.4.14
+==================================================
+
+- fix dupfile to work with files that don't
+ carry a mode. Thanks Jason R. Coombs.
+
+Changes between 1.4.12 and 1.4.13
+==================================================
+
+- fix getting statementrange/compiling a file ending
+ in a comment line without newline (on python2.5)
+- for local paths you can pass "mode=True" to a copy()
+ in order to copy permission bits (underlying mechanism
+ is using shutil.copymode)
+- add paths arguments to py.path.local.sysfind to restrict
+ search to the diretories in the path.
+- add isdir/isfile/islink to path.stat() objects allowing to perform
+ multiple checks without calling out multiple times
+- drop py.path.local.__new__ in favour of a simpler __init__
+- iniconfig: allow "name:value" settings in config files, no space after
+ "name" required
+- fix issue 27 - NameError in unlikely untested case of saferepr
+
+
+Changes between 1.4.11 and 1.4.12
+==================================================
+
+- fix python2.4 support - for pre-AST interpreters re-introduce
+ old way to find statements in exceptions (closes pytest issue 209)
+- add tox.ini to distribution
+- fix issue23 - print *,** args information in tracebacks,
+ thanks Manuel Jacob
+
+
+Changes between 1.4.10 and 1.4.11
+==================================================
+
+- use _ast to determine statement ranges when printing tracebacks -
+ avoiding multi-second delays on some large test modules
+- fix an internal test to not use class-denoted pytest_funcarg__
+- fix a doc link to bug tracker
+- try to make terminal.write() printing more robust against
+ unicodeencode/decode problems, amend according test
+- introduce py.builtin.text and py.builtin.bytes
+ to point to respective str/unicode (py2) and bytes/str (py3) types
+- fix error handling on win32/py33 for ENODIR
+
+Changes between 1.4.9 and 1.4.10
+==================================================
+
+- terminalwriter: default to encode to UTF8 if no encoding is defined
+ on the output stream
+- issue22: improve heuristic for finding the statementrange in exceptions
+
+Changes between 1.4.8 and 1.4.9
+==================================================
+
+- fix bug of path.visit() which would not recognize glob-style patterns
+ for the "rec" recursion argument
+- changed iniconfig parsing to better conform, now the chars ";"
+ and "#" only mark a comment at the stripped start of a line
+- include recent apipkg-1.2
+- change internal terminalwriter.line/reline logic to more nicely
+ support file spinners
+
+Changes between 1.4.7 and 1.4.8
+==================================================
+
+- fix issue 13 - correct handling of the tag name object in xmlgen
+- fix issue 14 - support raw attribute values in xmlgen
+- fix windows terminalwriter printing/re-line problem
+- update distribute_setup.py to 0.6.27
+
+Changes between 1.4.6 and 1.4.7
+==================================================
+
+- fix issue11 - own test failure with python3.3 / Thanks Benjamin Peterson
+- help fix pytest issue 102
+
+Changes between 1.4.5 and 1.4.6
+==================================================
+
+- help to fix pytest issue99: unify output of
+ ExceptionInfo.getrepr(style="native") with ...(style="long")
+- fix issue7: source.getstatementrange() now raises proper error
+ if no valid statement can be found
+- fix issue8: fix code and tests of svnurl/svnwc to work on subversion 1.7 -
+ note that path.status(updates=1) will not properly work svn-17's status
+ --xml output is broken.
+- make source.getstatementrange() more resilent about non-python code frames
+ (as seen from jnja2)
+- make trackeback recursion detection more resilent
+ about the eval magic of a decorator library
+- iniconfig: add support for ; as comment starter
+- properly handle lists in xmlgen on python3
+- normalize py.code.getfslineno(obj) to always return a (string, int) tuple
+ defaulting to ("", -1) respectively if no source code can be found for obj.
+
+Changes between 1.4.4 and 1.4.5
+==================================================
+
+- improve some unicode handling in terminalwriter and capturing
+ (used by pytest)
+
+Changes between 1.4.3 and 1.4.4
+==================================================
+
+- a few fixes and assertion related refinements for pytest-2.1
+- guard py.code.Code and getfslineno against bogus input
+ and make py.code.Code objects for object instance
+ by looking up their __call__ function.
+- make exception presentation robust against invalid current cwd
+
+Changes between 1.4.2 and 1.4.3
+==================================================
+
+- fix terminal coloring issue for skipped tests (thanks Amaury)
+- fix issue4 - large calls to ansi_print (thanks Amaury)
+
+Changes between 1.4.1 and 1.4.2
+==================================================
+
+- fix (pytest) issue23 - tmpdir argument now works on Python3.2 and WindowsXP
+ (which apparently starts to offer os.symlink now)
+
+- better error message for syntax errors from compiled code
+
+- small fix to better deal with (un-)colored terminal output on windows
+
+Changes between 1.4.0 and 1.4.1
+==================================================
+
+- fix issue1 - py.error.* classes to be pickleable
+
+- fix issue2 - on windows32 use PATHEXT as the list of potential
+ extensions to find find binaries with py.path.local.sysfind(commandname)
+
+- fix (pytest-) issue10 and refine assertion reinterpretation
+ to avoid breaking if the __nonzero__ of an object fails
+
+- fix (pytest-) issue17 where python3 does not like "import *"
+ leading to misrepresentation of import-errors in test modules
+
+- fix py.error.* attribute pypy access issue
+
+- allow path.samefile(arg) to succeed when arg is a relative filename
+
+- fix (pytest-) issue20 path.samefile(relpath) works as expected now
+
+- fix (pytest-) issue8 len(long_list) now shows the lenght of the list
+
+Changes between 1.3.4 and 1.4.0
+==================================================
+
+- py.test was moved to a separate "pytest" package. What remains is
+ a stub hook which will proxy ``import py.test`` to ``pytest``.
+- all command line tools ("py.cleanup/lookup/countloc/..." moved
+ to "pycmd" package)
+- removed the old and deprecated "py.magic" namespace
+- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
+- add py.iniconfig module for brain-dead easy ini-config file parsing
+- introduce py.builtin.any()
+- path objects have a .dirname attribute now (equivalent to
+ os.path.dirname(path))
+- path.visit() accepts breadthfirst (bf) and sort options
+- remove deprecated py.compat namespace
+
+Changes between 1.3.3 and 1.3.4
+==================================================
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions
+
+Changes between 1.3.2 and 1.3.3
+==================================================
+
+- fix issue113: assertion representation problem with triple-quoted strings
+ (and possibly other cases)
+- make conftest loading detect that a conftest file with the same
+ content was already loaded, avoids surprises in nested directory structures
+ which can be produced e.g. by Hudson. It probably removes the need to use
+ --confcutdir in most cases.
+- fix terminal coloring for win32
+ (thanks Michael Foord for reporting)
+- fix weirdness: make terminal width detection work on stdout instead of stdin
+ (thanks Armin Ronacher for reporting)
+- remove trailing whitespace in all py/text distribution files
+
+Changes between 1.3.1 and 1.3.2
+==================================================
+
+New features
+++++++++++++++++++
+
+- fix issue103: introduce py.test.raises as context manager, examples::
+
+ with py.test.raises(ZeroDivisionError):
+ x = 0
+ 1 / x
+
+ with py.test.raises(RuntimeError) as excinfo:
+ call_something()
+
+ # you may do extra checks on excinfo.value|type|traceback here
+
+ (thanks Ronny Pfannschmidt)
+
+- Funcarg factories can now dynamically apply a marker to a
+ test invocation. This is for example useful if a factory
+ provides parameters to a test which are expected-to-fail::
+
+ def pytest_funcarg__arg(request):
+ request.applymarker(py.test.mark.xfail(reason="flaky config"))
+ ...
+
+ def test_function(arg):
+ ...
+
+- improved error reporting on collection and import errors. This makes
+ use of a more general mechanism, namely that for custom test item/collect
+ nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
+ override it to return a string error representation of your choice
+ which is going to be reported as a (red) string.
+
+- introduce '--junitprefix=STR' option to prepend a prefix
+ to all reports in the junitxml file.
+
+Bug fixes / Maintenance
+++++++++++++++++++++++++++
+
+- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
+ to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
+ you can properly check for their existence in a cross-python manner).
+- refine --pdb: ignore xfailed tests, unify its TB-reporting and
+ don't display failures again at the end.
+- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
+- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
+- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
+- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
+- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
+- fix py.code.compile(source) to generate unique filenames
+- fix assertion re-interp problems on PyPy, by defering code
+ compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
+- fix py.path.local.pyimport() to work with directories
+- streamline py.path.local.mkdtemp implementation and usage
+- don't print empty lines when showing junitxml-filename
+- add optional boolean ignore_errors parameter to py.path.local.remove
+- fix terminal writing on win32/python2.4
+- py.process.cmdexec() now tries harder to return properly encoded unicode objects
+ on all python versions
+- install plain py.test/py.which scripts also for Jython, this helps to
+ get canonical script paths in virtualenv situations
+- make path.bestrelpath(path) return ".", note that when calling
+ X.bestrelpath the assumption is that X is a directory.
+- make initial conftest discovery ignore "--" prefixed arguments
+- fix resultlog plugin when used in an multicpu/multihost xdist situation
+ (thanks Jakub Gustak)
+- perform distributed testing related reporting in the xdist-plugin
+ rather than having dist-related code in the generic py.test
+ distribution
+- fix homedir detection on Windows
+- ship distribute_setup.py version 0.6.13
+
+Changes between 1.3.0 and 1.3.1
+==================================================
+
+New features
+++++++++++++++++++
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+Fixes / Maintenance
+++++++++++++++++++++++
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
+
+Changes between 1.2.1 and 1.3.0
+==================================================
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even a collection try of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism:
+ ``@py.test.mark.xfail(run=False)`` do not run the decorated test
+ ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
+ specifiying ``--runxfail`` on command line virtually ignores xfail markers
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+Changes between 1.2 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+Changes between 1.1.0 and 1.0.2
+=====================================
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+Changes between 1.0.1 and 1.0.2
+=====================================
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+Changes between 1.0.0 and 1.0.1
+=====================================
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+Changes between 1.0.0b9 and 1.0.0
+=====================================
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+Changes between 1.0.0b8 and 1.0.0b9
+=====================================
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+Changes between 1.0.0b7 and 1.0.0b8
+=====================================
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+Changes between 1.0.0b3 and 1.0.0b7
+=============================================
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+Changes between 1.0.0b1 and 1.0.0b3
+=============================================
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+Changes between 0.9.2 and 1.0.0b1
+=============================================
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecuture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+Changes between 0.9.1 and 0.9.2
+===============================
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+Changes between 0.9.0 and 0.9.1
+===============================
+
+This is a fairly complete list of changes between 0.9 and 0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/testing/web-platform/tests/tools/py/LICENSE b/testing/web-platform/tests/tools/py/LICENSE
new file mode 100644
index 000000000..31ecdfb1d
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/LICENSE
@@ -0,0 +1,19 @@
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
diff --git a/testing/web-platform/tests/tools/py/MANIFEST.in b/testing/web-platform/tests/tools/py/MANIFEST.in
new file mode 100644
index 000000000..31fb010b4
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/MANIFEST.in
@@ -0,0 +1,9 @@
+include CHANGELOG
+include AUTHORS
+include README.txt
+include setup.py
+include LICENSE
+include conftest.py
+include tox.ini
+graft doc
+graft testing
diff --git a/testing/web-platform/tests/tools/py/README.txt b/testing/web-platform/tests/tools/py/README.txt
new file mode 100644
index 000000000..e327e9373
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/README.txt
@@ -0,0 +1,21 @@
+.. image:: https://drone.io/bitbucket.org/pytest-dev/py/status.png
+ :target: https://drone.io/bitbucket.org/pytest-dev/py/latest
+.. image:: https://pypip.in/v/py/badge.png
+ :target: https://pypi.python.org/pypi/py
+
+The py lib is a Python development support library featuring
+the following tools and modules:
+
+* py.path: uniform local and svn path objects
+* py.apipkg: explicit API control and lazy-importing
+* py.iniconfig: easy parsing of .ini files
+* py.code: dynamic code generation and introspection
+
+NOTE: prior to the 1.4 release this distribution used to
+contain py.test which is now its own package, see http://pytest.org
+
+For questions and more information please visit http://pylib.readthedocs.org
+
+Bugs and issues: http://bitbucket.org/pytest-dev/py/issues/
+
+Authors: Holger Krekel and others, 2004-2015
diff --git a/testing/web-platform/tests/tools/py/bench/localpath.py b/testing/web-platform/tests/tools/py/bench/localpath.py
new file mode 100644
index 000000000..ad4fbd8e2
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/bench/localpath.py
@@ -0,0 +1,75 @@
+
+import py
+import timeit
+
+class Listdir:
+ numiter = 100000
+ numentries = 100
+
+ def setup(self):
+ tmpdir = py.path.local.make_numbered_dir(self.__class__.__name__)
+ for i in range(self.numentries):
+ tmpdir.join(str(i))
+ self.tmpdir = tmpdir
+
+ def run(self):
+ return self.tmpdir.listdir()
+
+class Listdir_arg(Listdir):
+ numiter = 100000
+ numentries = 100
+
+ def run(self):
+ return self.tmpdir.listdir("47")
+
+class Join_onearg(Listdir):
+ def run(self):
+ self.tmpdir.join("17")
+ self.tmpdir.join("18")
+ self.tmpdir.join("19")
+
+class Join_multi(Listdir):
+ def run(self):
+ self.tmpdir.join("a", "b")
+ self.tmpdir.join("a", "b", "c")
+ self.tmpdir.join("a", "b", "c", "d")
+
+class Check(Listdir):
+ def run(self):
+ self.tmpdir.check()
+ self.tmpdir.check()
+ self.tmpdir.check()
+
+class CheckDir(Listdir):
+ def run(self):
+ self.tmpdir.check(dir=1)
+ self.tmpdir.check(dir=1)
+ assert not self.tmpdir.check(dir=0)
+
+class CheckDir2(Listdir):
+ def run(self):
+ self.tmpdir.stat().isdir()
+ self.tmpdir.stat().isdir()
+ assert self.tmpdir.stat().isdir()
+
+class CheckFile(Listdir):
+ def run(self):
+ self.tmpdir.check(file=1)
+ assert not self.tmpdir.check(file=1)
+ assert self.tmpdir.check(file=0)
+
+if __name__ == "__main__":
+ import time
+ for cls in [Listdir, Listdir_arg,
+ Join_onearg, Join_multi,
+ Check, CheckDir, CheckDir2, CheckFile,]:
+
+ inst = cls()
+ inst.setup()
+ now = time.time()
+ for i in xrange(cls.numiter):
+ inst.run()
+ elapsed = time.time() - now
+ print "%s: %d loops took %.2f seconds, per call %.6f" %(
+ cls.__name__,
+ cls.numiter, elapsed, elapsed / cls.numiter)
diff --git a/testing/web-platform/tests/tools/py/conftest.py b/testing/web-platform/tests/tools/py/conftest.py
new file mode 100644
index 000000000..11c2d4425
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/conftest.py
@@ -0,0 +1,71 @@
+import py
+import sys
+
+pytest_plugins = 'doctest pytester'.split()
+
+collect_ignore = ['build', 'doc/_build']
+
+
+import os, py
+pid = os.getpid()
+
+def pytest_addoption(parser):
+ group = parser.getgroup("pylib", "py lib testing options")
+ group.addoption('--runslowtests',
+ action="store_true", dest="runslowtests", default=False,
+ help=("run slow tests"))
+
+def pytest_funcarg__sshhost(request):
+ val = request.config.getvalue("sshhost")
+ if val:
+ return val
+ py.test.skip("need --sshhost option")
+def pytest_generate_tests(metafunc):
+ multi = getattr(metafunc.function, 'multi', None)
+ if multi is not None:
+ assert len(multi.kwargs) == 1
+ for name, l in multi.kwargs.items():
+ for val in l:
+ metafunc.addcall(funcargs={name: val})
+ elif 'anypython' in metafunc.funcargnames:
+ for name in ('python2.4', 'python2.5', 'python2.6',
+ 'python2.7', 'python3.1', 'pypy-c', 'jython'):
+ metafunc.addcall(id=name, param=name)
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+ 'python2.7': r'C:\Python27\python.exe',
+ 'python2.6': r'C:\Python26\python.exe',
+ 'python2.5': r'C:\Python25\python.exe',
+ 'python2.4': r'C:\Python24\python.exe',
+ 'python3.1': r'C:\Python31\python.exe',
+}
+
+def getexecutable(name, cache={}):
+ try:
+ return cache[name]
+ except KeyError:
+ executable = py.path.local.sysfind(name)
+ if executable:
+ if name == "jython":
+ import subprocess
+ popen = subprocess.Popen([str(executable), "--version"],
+ universal_newlines=True, stderr=subprocess.PIPE)
+ out, err = popen.communicate()
+ if not err or "2.5" not in err:
+ executable = None
+ cache[name] = executable
+ return executable
+
+def pytest_funcarg__anypython(request):
+ name = request.param
+ executable = getexecutable(name)
+ if executable is None:
+ if sys.platform == "win32":
+ executable = winpymap.get(name, None)
+ if executable:
+ executable = py.path.local(executable)
+ if executable.check():
+ return executable
+ py.test.skip("no %s found" % (name,))
+ return executable
diff --git a/testing/web-platform/tests/tools/py/doc/Makefile b/testing/web-platform/tests/tools/py/doc/Makefile
new file mode 100644
index 000000000..0a0e89e01
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/Makefile
@@ -0,0 +1,133 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+install: clean html
+ rsync -avz _build/html/ code:www-pylib/
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/py.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/py.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/py"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/py"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ make -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/testing/web-platform/tests/tools/py/doc/_templates/layout.html b/testing/web-platform/tests/tools/py/doc/_templates/layout.html
new file mode 100644
index 000000000..683863aa4
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/_templates/layout.html
@@ -0,0 +1,18 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script type="text/javascript">
+
+ var _gaq = _gaq || [];
+ _gaq.push(['_setAccount', 'UA-7597274-14']);
+ _gaq.push(['_trackPageview']);
+
+ (function() {
+ var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+ ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+ })();
+
+</script>
+{% endblock %}
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-0.9.0.txt b/testing/web-platform/tests/tools/py/doc/announce/release-0.9.0.txt
new file mode 100644
index 000000000..071093135
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-0.9.0.txt
@@ -0,0 +1,7 @@
+py lib 1.0.0: XXX
+======================================================================
+
+Welcome to the 1.0.0 py lib release - a library aiming to
+support agile and test-driven python development on various levels.
+
+XXX
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-0.9.2.txt b/testing/web-platform/tests/tools/py/doc/announce/release-0.9.2.txt
new file mode 100644
index 000000000..bc2d2ef29
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-0.9.2.txt
@@ -0,0 +1,27 @@
+py lib 0.9.2: bugfix release
+=============================
+
+Welcome to the 0.9.2 py lib and py.test release -
+mainly fixing Windows issues, providing better
+packaging and integration with setuptools.
+
+Here is a quick summary of what the py lib provides:
+
+* py.test: cross-project testing tool with many advanced features
+* py.execnet: ad-hoc code distribution to SSH, Socket and local sub processes
+* py.magic.greenlet: micro-threads on standard CPython ("stackless-light")
+* py.path: path abstractions over local and subversion files
+* rich documentation of py's exported API
+* tested against Linux, Win32, OSX, works on python 2.3-2.6
+
+See here for more information:
+
+Pypi pages: http://pypi.python.org/pypi/py/
+
+Download/Install: http://codespeak.net/py/0.9.2/download.html
+
+Documentation/API: http://codespeak.net/py/0.9.2/index.html
+
+best and have fun,
+
+holger krekel
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.0.0.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.0.0.txt
new file mode 100644
index 000000000..7024255a1
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.0.0.txt
@@ -0,0 +1,63 @@
+
+pylib 1.0.0 released: testing-with-python innovations continue
+--------------------------------------------------------------------
+
+Took a few betas but finally i uploaded a `1.0.0 py lib release`_,
+featuring the mature and powerful py.test tool and "execnet-style"
+*elastic* distributed programming. With the new release, there are
+many new advanced automated testing features - here is a quick summary:
+
+* funcargs_ - pythonic zero-boilerplate fixtures for Python test functions :
+
+ - totally separates test code, test configuration and test setup
+ - ideal for integration and functional tests
+ - allows for flexible and natural test parametrization schemes
+
+* new `plugin architecture`_, allowing easy-to-write project-specific and cross-project single-file plugins. The most notable new external plugin is `oejskit`_ which naturally enables **running and reporting of javascript-unittests in real-life browsers**.
+
+* many new features done in easy-to-improve `default plugins`_, highlights:
+
+ * xfail: mark tests as "expected to fail" and report separately.
+ * pastebin: automatically send tracebacks to pocoo paste service
+ * capture: flexibly capture stdout/stderr of subprocesses, per-test ...
+ * monkeypatch: safely monkeypatch modules/classes from within tests
+ * unittest: run and integrate traditional unittest.py tests
+ * figleaf: generate html coverage reports with the figleaf module
+ * resultlog: generate buildbot-friendly reporting output
+ * ...
+
+* `distributed testing`_ and `elastic distributed execution`_:
+
+ - new unified "TX" URL scheme for specifying remote processes
+ - new distribution modes "--dist=each" and "--dist=load"
+ - new sync/async ways to handle 1:N communication
+ - improved documentation
+
+The py lib continues to offer most of the functionality used by
+the testing tool in `independent namespaces`_.
+
+Some non-test related code, notably greenlets/co-routines and
+api-generation now live as their own projects which simplifies the
+installation procedure because no C-Extensions are required anymore.
+
+The whole package should work well with Linux, Win32 and OSX, on Python
+2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
+
+For more info, see the py.test and py lib documentation:
+
+ http://pytest.org
+
+ http://pylib.org
+
+have fun,
+holger
+
+.. _`independent namespaces`: http://pylib.org
+.. _`funcargs`: http://codespeak.net/py/dist/test/funcargs.html
+.. _`plugin architecture`: http://codespeak.net/py/dist/test/extend.html
+.. _`default plugins`: http://codespeak.net/py/dist/test/plugin/index.html
+.. _`distributed testing`: http://codespeak.net/py/dist/test/dist.html
+.. _`elastic distributed execution`: http://codespeak.net/py/dist/execnet.html
+.. _`1.0.0 py lib release`: http://pypi.python.org/pypi/py
+.. _`oejskit`: http://codespeak.net/py/dist/test/plugin/oejskit.html
+
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.0.1.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.0.1.txt
new file mode 100644
index 000000000..0c9f8760b
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.0.1.txt
@@ -0,0 +1,48 @@
+1.0.1: improved reporting, nose/unittest.py support, bug fixes
+-----------------------------------------------------------------------
+
+This is a bugfix release of pylib/py.test also coming with:
+
+* improved documentation, improved navigation
+* test failure reporting improvements
+* support for directly running existing nose/unittest.py style tests
+
+visit here for more info, including quickstart and tutorials:
+
+ http://pytest.org and http://pylib.org
+
+
+Changelog 1.0.0 to 1.0.1
+------------------------
+
+* added a default 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* improved documentation, better navigation: see http://pytest.org
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* unicode fixes: capturing and unicode writes to sys.stdout
+ (through e.g a print statement) now work within tests,
+ they are encoded as "utf8" by default, also terminalwriting
+ was adapted and somewhat unified between windows and linux
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.0.2.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.0.2.txt
new file mode 100644
index 000000000..235461953
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.0.2.txt
@@ -0,0 +1,5 @@
+1.0.2: packaging fixes
+-----------------------------------------------------------------------
+
+this release is purely a release for fixing packaging issues.
+
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.1.0.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.1.0.txt
new file mode 100644
index 000000000..0441c3215
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.1.0.txt
@@ -0,0 +1,115 @@
+py.test/pylib 1.1.0: Python3, Jython, advanced skipping, cleanups ...
+--------------------------------------------------------------------------------
+
+Features:
+
+* compatible to Python3 (single py2/py3 source), `easy to install`_
+* conditional skipping_: skip/xfail based on platform/dependencies
+* generalized marking_: mark tests one a whole-class or whole-module basis
+
+Fixes:
+
+* code reduction and "de-magification" (e.g. 23 KLoc -> 11 KLOC)
+* distribute testing requires the now separately released execnet_ package
+* funcarg-setup/caching, "same-name" test modules now cause an exlicit error
+* de-cluttered reporting options, --report for skipped/xfail details
+
+Compatibilities
+
+1.1.0 should allow running test code that already worked well with 1.0.2
+plus some more due to improved unittest/nose compatibility.
+
+More information: http://pytest.org
+
+thanks and have fun,
+
+holger (http://twitter.com/hpk42)
+
+.. _execnet: http://codespeak.net/execnet
+.. _`easy to install`: ../install.html
+.. _marking: ../test/plugin/mark.html
+.. _skipping: ../test/plugin/skipping.html
+
+
+Changelog 1.0.2 -> 1.1.0
+-----------------------------------------------------------------------
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.1.1.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.1.1.txt
new file mode 100644
index 000000000..83e6a1fd8
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.1.1.txt
@@ -0,0 +1,48 @@
+py.test/pylib 1.1.1: bugfix release, setuptools plugin registration
+--------------------------------------------------------------------------------
+
+This is a compatibility fixing release of pylib/py.test to work
+better with previous 1.0.x test code bases. It also contains fixes
+and changes to work with `execnet>=1.0.0`_ to provide distributed
+testing and looponfailing testing modes. py-1.1.1 also introduces
+a new mechanism for registering plugins via setuptools.
+
+What is pylib/py.test?
+-----------------------
+
+py.test is an advanced automated testing tool working with
+Python2, Python3 and Jython versions on all major operating
+systems. It has an extensive plugin architecture and can run many
+existing common Python test suites without modification. Moreover,
+it offers some unique features not found in other
+testing tools. See http://pytest.org for more info.
+
+The pylib also contains a localpath and svnpath implementation
+and some developer-oriented command line tools. See
+http://pylib.org for more info.
+
+thanks to all who helped and gave feedback,
+have fun,
+
+holger (http://twitter.com/hpk42)
+
+.. _`execnet>=1.0.0`: http://codespeak.net/execnet
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.2.0.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.2.0.txt
new file mode 100644
index 000000000..4f6a56144
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.2.0.txt
@@ -0,0 +1,116 @@
+py.test/pylib 1.2.0: junitxml, standalone test scripts, pluginization
+--------------------------------------------------------------------------------
+
+py.test is an advanced automated testing tool working with
+Python2, Python3 and Jython versions on all major operating
+systems. It has a simple plugin architecture and can run many
+existing common Python test suites without modification. It offers
+some unique features not found in other testing tools.
+See http://pytest.org for more info.
+
+py.test 1.2.0 brings many bug fixes and interesting new abilities:
+
+* --junitxml=path will create an XML file for use with CI processing
+* --genscript=path creates a standalone py.test-equivalent test-script
+* --ignore=path prevents collection of anything below that path
+* --confcutdir=path only lookup conftest.py test configs below that path
+* a 'pytest_report_header' hook to add info to the terminal report header
+* a 'pytestconfig' function argument gives direct access to option values
+* 'pytest_generate_tests' can now be put into a class as well
+* on CPython py.test additionally installs as "py.test-VERSION", on
+ Jython as py.test-jython and on PyPy as py.test-pypy-XYZ
+
+Apart from many bug fixes 1.2.0 also has better pluginization:
+Distributed testing and looponfailing testing now live in the
+separately installable 'pytest-xdist' plugin. The same is true for
+'pytest-figleaf' for doing coverage reporting. Those two plugins
+can serve well now as blue prints for doing your own.
+
+thanks to all who helped and gave feedback,
+have fun,
+
+holger krekel, January 2010
+
+Changes between 1.2.0 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.2.1.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.2.1.txt
new file mode 100644
index 000000000..5bf8ba22d
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.2.1.txt
@@ -0,0 +1,66 @@
+py.test/pylib 1.2.1: little fixes and improvements
+--------------------------------------------------------------------------------
+
+py.test is an advanced automated testing tool working with
+Python2, Python3 and Jython versions on all major operating
+systems. It has a simple plugin architecture and can run many
+existing common Python test suites without modification. It offers
+some unique features not found in other testing tools.
+See http://pytest.org for more info.
+
+py.test 1.2.1 brings bug fixes and some new options and abilities triggered
+by user feedback:
+
+* --funcargs [testpath] will show available builtin- and project funcargs.
+* display a short and concise traceback if funcarg lookup fails.
+* early-load "conftest.py" files in non-dot first-level sub directories.
+* --tb=line will print a single line for each failing test (issue67)
+* py.cleanup has a number of new options, cleanups up setup.py related files
+* fix issue78: always call python-level teardown functions even if the
+ according setup failed.
+
+For more detailed information see the changelog below.
+
+cheers and have fun,
+
+holger
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.3.0.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.0.txt
new file mode 100644
index 000000000..cf97db036
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.0.txt
@@ -0,0 +1,580 @@
+py.test/pylib 1.3.0: new options, per-plugin hooks, fixes ...
+===========================================================================
+
+The 1.3.0 release introduces new options, bug fixes and improved compatibility
+with Python3 and Jython-2.5.1 on Windows. If you already use py-1.2 chances
+are you can use py-1.3.0. See the below CHANGELOG for more details and
+http://pylib.org/install.html for installation instructions.
+
+py.test is an advanced automated testing tool working with Python2,
+Python3, Jython and PyPy versions on all major operating systems. It
+offers a no-boilerplate testing approach and has inspired other testing
+tools and enhancements in the standard Python library for more than five
+years. It has a simple and extensive plugin architecture, configurable
+reporting and provides unique ways to make it fit to your testing
+process and needs.
+
+See http://pytest.org for more info.
+
+cheers and have fun,
+
+holger krekel
+
+Changes between 1.2.1 and 1.3.0
+==================================================
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even collection of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism::
+
+ @py.test.mark.xfail(run=False) do not run the decorated test
+ @py.test.mark.xfail(reason="...") prints the reason string in xfail summaries
+
+ specifiying ``--runxfail`` on command line ignores xfail markers to show
+ you the underlying traceback.
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+Changes between 1.2 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+Changes between 1.1.0 and 1.0.2
+=====================================
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+Changes between 1.0.1 and 1.0.2
+=====================================
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+Changes between 1.0.0 and 1.0.1
+=====================================
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+Changes between 1.0.0b9 and 1.0.0
+=====================================
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+Changes between 1.0.0b8 and 1.0.0b9
+=====================================
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+Changes between 1.0.0b7 and 1.0.0b8
+=====================================
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+Changes between 1.0.0b3 and 1.0.0b7
+=============================================
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+Changes between 1.0.0b1 and 1.0.0b3
+=============================================
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+Changes between 0.9.2 and 1.0.0b1
+=============================================
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecuture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+Changes between 0.9.1 and 0.9.2
+===============================
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+Changes between 0.9.0 and 0.9.1
+===============================
+
+This is a fairly complete list of changes between 0.9 and 0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.3.1.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.1.txt
new file mode 100644
index 000000000..471de408a
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.1.txt
@@ -0,0 +1,104 @@
+py.test/pylib 1.3.1: new py.test.xfail, --maxfail, better reporting
+===========================================================================
+
+The pylib/py.test 1.3.1 release brings:
+
+- the new imperative ``py.test.xfail()`` helper in order to have a test or
+ setup function result in an "expected failure"
+- a new option ``--maxfail=NUM`` to stop the test run after some failures
+- markers/decorators are now applicable to test classes (>=Python2.6)
+- improved reporting, shorter tracebacks in several cases
+- some simplified internals, more compatibility with Jython and PyPy
+- bug fixes and various refinements
+
+See the below CHANGELOG entry below for more details and
+http://pylib.org/install.html for installation instructions.
+
+If you used older versions of py.test you should be able to upgrade
+to 1.3.1 without changes to your test source code.
+
+py.test is an automated testing tool working with Python2,
+Python3, Jython and PyPy versions on all major operating systems. It
+offers a no-boilerplate testing approach and has inspired other testing
+tools and enhancements in the standard Python library for more than five
+years. It has a simple and extensive plugin architecture, configurable
+reporting and provides unique ways to make it fit to your testing
+process and needs.
+
+See http://pytest.org for more info.
+
+cheers and have fun,
+
+holger krekel
+
+Changes between 1.3.0 and 1.3.1
+==================================================
+
+New features
+++++++++++++++++++
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+Fixes / Maintenance
+++++++++++++++++++++++
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.3.2.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.2.txt
new file mode 100644
index 000000000..599dfbed7
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.2.txt
@@ -0,0 +1,720 @@
+py.test/pylib 1.3.2: API and reporting refinements, many fixes
+===========================================================================
+
+The pylib/py.test 1.3.2 release brings many bug fixes and some new
+features. It was refined for and tested against the recently released
+Python2.7 and remains compatibile to the usual armada of interpreters
+(Python2.4 through to Python3.1.2, Jython and PyPy). Note that for using
+distributed testing features you'll need to upgrade to the jointly released
+pytest-xdist-1.4 because of some internal refactorings.
+
+See http://pytest.org for general documentation and below for
+a detailed CHANGELOG.
+
+cheers & particular thanks to Benjamin Peterson, Ronny Pfannschmidt
+and all issue and patch contributors,
+
+holger krekel
+
+Changes between 1.3.1 and 1.3.2
+==================================================
+
+New features
+++++++++++++++++++
+
+- fix issue103: introduce py.test.raises as context manager, examples::
+
+ with py.test.raises(ZeroDivisionError):
+ x = 0
+ 1 / x
+
+ with py.test.raises(RuntimeError) as excinfo:
+ call_something()
+
+ # you may do extra checks on excinfo.value|type|traceback here
+
+ (thanks Ronny Pfannschmidt)
+
+- Funcarg factories can now dynamically apply a marker to a
+ test invocation. This is for example useful if a factory
+ provides parameters to a test which are expected-to-fail::
+
+ def pytest_funcarg__arg(request):
+ request.applymarker(py.test.mark.xfail(reason="flaky config"))
+ ...
+
+ def test_function(arg):
+ ...
+
+- improved error reporting on collection and import errors. This makes
+ use of a more general mechanism, namely that for custom test item/collect
+ nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
+ override it to return a string error representation of your choice
+ which is going to be reported as a (red) string.
+
+- introduce '--junitprefix=STR' option to prepend a prefix
+ to all reports in the junitxml file.
+
+Bug fixes / Maintenance
+++++++++++++++++++++++++++
+
+- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
+ to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
+ you can properly check for their existence in a cross-python manner).
+- refine --pdb: ignore xfailed tests, unify its TB-reporting and
+ don't display failures again at the end.
+- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
+- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
+- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
+- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
+- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
+- fix py.code.compile(source) to generate unique filenames
+- fix assertion re-interp problems on PyPy, by defering code
+ compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
+- fix py.path.local.pyimport() to work with directories
+- streamline py.path.local.mkdtemp implementation and usage
+- don't print empty lines when showing junitxml-filename
+- add optional boolean ignore_errors parameter to py.path.local.remove
+- fix terminal writing on win32/python2.4
+- py.process.cmdexec() now tries harder to return properly encoded unicode objects
+ on all python versions
+- install plain py.test/py.which scripts also for Jython, this helps to
+ get canonical script paths in virtualenv situations
+- make path.bestrelpath(path) return ".", note that when calling
+ X.bestrelpath the assumption is that X is a directory.
+- make initial conftest discovery ignore "--" prefixed arguments
+- fix resultlog plugin when used in an multicpu/multihost xdist situation
+ (thanks Jakub Gustak)
+- perform distributed testing related reporting in the xdist-plugin
+ rather than having dist-related code in the generic py.test
+ distribution
+- fix homedir detection on Windows
+- ship distribute_setup.py version 0.6.13
+
+Changes between 1.3.0 and 1.3.1
+==================================================
+
+New features
+++++++++++++++++++
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+Fixes / Maintenance
+++++++++++++++++++++++
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
+
+Changes between 1.2.1 and 1.3.0
+==================================================
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even a collection try of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism:
+ ``@py.test.mark.xfail(run=False)`` do not run the decorated test
+ ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
+ specifiying ``--runxfail`` on command line virtually ignores xfail markers
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+Changes between 1.2.1 and 1.2.0
+=====================================
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+Changes between 1.2 and 1.1.1
+=====================================
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+Changes between 1.1.1 and 1.1.0
+=====================================
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+Changes between 1.1.0 and 1.0.2
+=====================================
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+Changes between 1.0.1 and 1.0.2
+=====================================
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+Changes between 1.0.0 and 1.0.1
+=====================================
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+Changes between 1.0.0b9 and 1.0.0
+=====================================
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+Changes between 1.0.0b8 and 1.0.0b9
+=====================================
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+Changes between 1.0.0b7 and 1.0.0b8
+=====================================
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+Changes between 1.0.0b3 and 1.0.0b7
+=============================================
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+Changes between 1.0.0b1 and 1.0.0b3
+=============================================
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+Changes between 0.9.2 and 1.0.0b1
+=============================================
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecuture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+Changes between 0.9.1 and 0.9.2
+===============================
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+Changes between 0.9.0 and 0.9.1
+===============================
+
+This is a fairly complete list of changes between 0.9 and 0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.3.3.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.3.txt
new file mode 100644
index 000000000..c62cb8590
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.3.txt
@@ -0,0 +1,26 @@
+py.test/pylib 1.3.3: windows and other fixes
+===========================================================================
+
+pylib/py.test 1.3.3 is a minor bugfix release featuring some improvements
+and fixes. See changelog_ for full history.
+
+have fun,
+holger krekel
+
+.. _changelog: ../changelog.html
+
+Changes between 1.3.2 and 1.3.3
+==================================================
+
+- fix issue113: assertion representation problem with triple-quoted strings
+ (and possibly other cases)
+- make conftest loading detect that a conftest file with the same
+ content was already loaded, avoids surprises in nested directory structures
+ which can be produced e.g. by Hudson. It probably removes the need to use
+ --confcutdir in most cases.
+- fix terminal coloring for win32
+ (thanks Michael Foord for reporting)
+- fix weirdness: make terminal width detection work on stdout instead of stdin
+ (thanks Armin Ronacher for reporting)
+- remove trailing whitespace in all py/text distribution files
+
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.3.4.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.4.txt
new file mode 100644
index 000000000..c156c8bdb
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.3.4.txt
@@ -0,0 +1,22 @@
+py.test/pylib 1.3.4: fixes and new native traceback option
+===========================================================================
+
+pylib/py.test 1.3.4 is a minor maintenance release mostly containing bug fixes
+and a new "--tb=native" traceback option to show "normal" Python standard
+tracebacks instead of the py.test enhanced tracebacks. See below for more
+change info and http://pytest.org for more general information on features
+and configuration of the testing tool.
+
+Thanks to the issue reporters and generally to Ronny Pfannschmidt for help.
+
+cheers,
+holger krekel
+
+Changes between 1.3.3 and 1.3.4
+==================================================
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.4.0.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.4.0.txt
new file mode 100644
index 000000000..6f9a7714d
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.4.0.txt
@@ -0,0 +1,47 @@
+
+.. _`release-1.4.0`:
+
+py-1.4.0: cross-python lib for path, code, io, ... manipulations
+===========================================================================
+
+"py" is a small library comprising APIs for filesystem and svn path
+manipulations, dynamic code construction and introspection, a Py2/Py3
+compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
+(on windows and linux), ini-file parsing and a lazy import mechanism.
+It runs unmodified on all Python interpreters compatible to Python2.4 up
+until Python 3.2. The general goal with "py" is to provide stable APIs
+for some common tasks that are continously tested against many Python
+interpreters and thus also to help transition. Here are some docs:
+
+ http://pylib.org
+
+NOTE: The prior py-1.3.X versions contained "py.test" which now comes
+as its own separate "pytest" distribution and was just released
+as "pytest-2.0.0", see here for the revamped docs:
+
+ http://pytest.org
+
+And "py.cleanup|py.lookup|py.countloc" etc. helpers are now part of
+the pycmd distribution, see http://pypi.python.org/pypi/pycmd
+
+This makes "py-1.4.0" a simple library which does not install
+any command line utilities anymore.
+
+cheers,
+holger
+
+Changes between 1.3.4 and 1.4.0
+-------------------------------------
+
+- py.test was moved to a separate "pytest" package. What remains is
+ a stub hook which will proxy ``import py.test`` to ``pytest``.
+- all command line tools ("py.cleanup/lookup/countloc/..." moved
+ to "pycmd" package)
+- removed the old and deprecated "py.magic" namespace
+- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
+- add py.iniconfig module for brain-dead easy ini-config file parsing
+- introduce py.builtin.any()
+- path objects have a .dirname attribute now (equivalent to
+ os.path.dirname(path))
+- path.visit() accepts breadthfirst (bf) and sort options
+- remove deprecated py.compat namespace
diff --git a/testing/web-platform/tests/tools/py/doc/announce/release-1.4.1.txt b/testing/web-platform/tests/tools/py/doc/announce/release-1.4.1.txt
new file mode 100644
index 000000000..a5aa76b14
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/release-1.4.1.txt
@@ -0,0 +1,47 @@
+
+.. _`release-1.4.1`:
+
+py-1.4.1: cross-python lib for fs path, code, io, ... manipulations
+===========================================================================
+
+This is a bug fix release of the "py" lib, see below for detailed changes.
+The py lib is a small library comprising APIs for filesystem and svn path
+manipulations, dynamic code construction and introspection, a Py2/Py3
+compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
+(on windows and linux), ini-file parsing and a lazy import mechanism.
+It runs unmodified on all Python interpreters compatible to Python2.4 up
+until Python 3.2, PyPy and Jython. The general goal with "py" is to
+provide stable APIs for some common tasks that are continously tested
+against many Python interpreters and thus also to help transition. Here
+are some docs:
+
+ http://pylib.org
+
+NOTE: The prior py-1.3.X versions contained "py.test" which since py-1.4.0
+comes as its own separate "pytest" distribution, see:
+
+ http://pytest.org
+
+Also, the "py.cleanup|py.lookup|py.countloc" helpers are now part of
+the pycmd distribution, see http://pypi.python.org/pypi/pycmd
+
+
+Changes between 1.4.0 and 1.4.1
+==================================================
+
+- fix issue1 - py.error.* classes to be pickleable
+
+- fix issue2 - on windows32 use PATHEXT as the list of potential
+ extensions to find find binaries with py.path.local.sysfind(commandname)
+
+- fix (pytest-) issue10 and refine assertion reinterpretation
+ to avoid breaking if the __nonzero__ of an object fails
+
+- fix (pytest-) issue17 where python3 does not like star-imports,
+ leading to misrepresentation of import-errors in test modules
+
+- fix ``py.error.*`` attribute pypy access
+
+- allow path.samefile(arg) to succeed when arg is a relative filename
+
+- fix (pytest-) issue20 path.samefile(relpath) works as expected now
diff --git a/testing/web-platform/tests/tools/py/doc/announce/releases.txt b/testing/web-platform/tests/tools/py/doc/announce/releases.txt
new file mode 100644
index 000000000..309c29bac
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/announce/releases.txt
@@ -0,0 +1,16 @@
+=============
+Release notes
+=============
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+.. include: release-1.1.0
+.. include: release-1.0.2
+
+ release-1.0.1
+ release-1.0.0
+ release-0.9.2
+ release-0.9.0
diff --git a/testing/web-platform/tests/tools/py/doc/changelog.txt b/testing/web-platform/tests/tools/py/doc/changelog.txt
new file mode 100644
index 000000000..237daca35
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/changelog.txt
@@ -0,0 +1,3 @@
+.. _`changelog`:
+
+.. include:: ../CHANGELOG
diff --git a/testing/web-platform/tests/tools/py/doc/code.txt b/testing/web-platform/tests/tools/py/doc/code.txt
new file mode 100644
index 000000000..bdd8691da
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/code.txt
@@ -0,0 +1,150 @@
+================================================================================
+py.code: higher level python code and introspection objects
+================================================================================
+
+``py.code`` provides higher level APIs and objects for Code, Frame, Traceback,
+ExceptionInfo and source code construction. The ``py.code`` library
+tries to simplify accessing the code objects as well as creating them.
+There is a small set of interfaces a user needs to deal with, all nicely
+bundled together, and with a rich set of 'Pythonic' functionality.
+
+Contents of the library
+=======================
+
+Every object in the ``py.code`` library wraps a code Python object related
+to code objects, source code, frames and tracebacks: the ``py.code.Code``
+class wraps code objects, ``py.code.Source`` source snippets,
+``py.code.Traceback` exception tracebacks, ``py.code.Frame`` frame
+objects (as found in e.g. tracebacks) and ``py.code.ExceptionInfo`` the
+tuple provided by sys.exc_info() (containing exception and traceback
+information when an exception occurs). Also in the library is a helper function
+``py.code.compile()`` that provides the same functionality as Python's
+built-in 'compile()' function, but returns a wrapped code object.
+
+The wrappers
+============
+
+``py.code.Code``
+-------------------
+
+Code objects are instantiated with a code object or a callable as argument,
+and provide functionality to compare themselves with other Code objects, get to
+the source file or its contents, create new Code objects from scratch, etc.
+
+A quick example::
+
+ >>> import py
+ >>> c = py.code.Code(py.path.local.read)
+ >>> c.path.basename
+ 'common.py'
+ >>> isinstance(c.source(), py.code.Source)
+ True
+ >>> str(c.source()).split('\n')[0]
+ "def read(self, mode='r'):"
+
+.. autoclass:: py.code.Code
+ :members:
+ :inherited-members:
+
+
+``py.code.Source``
+---------------------
+
+Source objects wrap snippets of Python source code, providing a simple yet
+powerful interface to read, deindent, slice, compare, compile and manipulate
+them, things that are not so easy in core Python.
+
+Example::
+
+ >>> s = py.code.Source("""\
+ ... def foo():
+ ... print "foo"
+ ... """)
+ >>> str(s).startswith('def') # automatic de-indentation!
+ True
+ >>> s.isparseable()
+ True
+ >>> sub = s.getstatement(1) # get the statement starting at line 1
+ >>> str(sub).strip() # XXX why is the strip() required?!?
+ 'print "foo"'
+
+.. autoclass:: py.code.Source
+ :members:
+
+
+``py.code.Traceback``
+------------------------
+
+Tracebacks are usually not very easy to examine, you need to access certain
+somewhat hidden attributes of the traceback's items (resulting in expressions
+such as 'fname = tb.tb_next.tb_frame.f_code.co_filename'). The Traceback
+interface (and its TracebackItem children) tries to improve this.
+
+Example::
+
+ >>> import sys
+ >>> try:
+ ... py.path.local(100) # illegal argument
+ ... except:
+ ... exc, e, tb = sys.exc_info()
+ >>> t = py.code.Traceback(tb)
+ >>> first = t[1] # get the second entry (first is in this doc)
+ >>> first.path.basename # second is in py/path/local.py
+ 'local.py'
+ >>> isinstance(first.statement, py.code.Source)
+ True
+ >>> str(first.statement).strip().startswith('raise ValueError')
+ True
+
+.. autoclass:: py.code.Traceback
+ :members:
+
+``py.code.Frame``
+--------------------
+
+Frame wrappers are used in ``py.code.Traceback`` items, and will usually not
+directly be instantiated. They provide some nice methods to evaluate code
+'inside' the frame (using the frame's local variables), get to the underlying
+code (frames have a code attribute that points to a ``py.code.Code`` object)
+and examine the arguments.
+
+Example (using the 'first' TracebackItem instance created above)::
+
+ >>> frame = first.frame
+ >>> isinstance(frame.code, py.code.Code)
+ True
+ >>> isinstance(frame.eval('self'), py.path.local)
+ True
+ >>> [namevalue[0] for namevalue in frame.getargs()]
+ ['cls', 'path']
+
+.. autoclass:: py.code.Frame
+ :members:
+
+``py.code.ExceptionInfo``
+----------------------------
+
+A wrapper around the tuple returned by sys.exc_info() (will call sys.exc_info()
+itself if the tuple is not provided as an argument), provides some handy
+attributes to easily access the traceback and exception string.
+
+Example::
+
+ >>> import sys
+ >>> try:
+ ... foobar()
+ ... except:
+ ... excinfo = py.code.ExceptionInfo()
+ >>> excinfo.typename
+ 'NameError'
+ >>> isinstance(excinfo.traceback, py.code.Traceback)
+ True
+ >>> excinfo.exconly()
+ "NameError: name 'foobar' is not defined"
+
+.. autoclass:: py.code.ExceptionInfo
+ :members:
+
+.. autoclass:: py.code.Traceback
+ :members:
+
diff --git a/testing/web-platform/tests/tools/py/doc/conf.py b/testing/web-platform/tests/tools/py/doc/conf.py
new file mode 100644
index 000000000..de4cbf8a4
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/conf.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+#
+# py documentation build configuration file, created by
+# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.txt'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'py'
+copyright = u'2010, holger krekel et. al.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+# The full version, including alpha/beta/rc tags.
+import py
+release = py.__version__
+version = ".".join(release.split(".")[:2])
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'py'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'py.tex', u'py Documentation',
+ u'holger krekel et. al.', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'py', u'py Documentation',
+ [u'holger krekel et. al.'], 1)
+]
+
+autodoc_member_order = "bysource"
+autodoc_default_flags = "inherited-members"
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'py'
+epub_author = u'holger krekel et. al.'
+epub_publisher = u'holger krekel et. al.'
+epub_copyright = u'2010, holger krekel et. al.'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/testing/web-platform/tests/tools/py/doc/download.html b/testing/web-platform/tests/tools/py/doc/download.html
new file mode 100644
index 000000000..5f4c46640
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/download.html
@@ -0,0 +1,18 @@
+<html>
+ <head>
+ <meta http-equiv="refresh" content=" 1 ; URL=install.html" />
+ </head>
+
+ <body>
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-7597274-3");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+</body>
+</html>
+
diff --git a/testing/web-platform/tests/tools/py/doc/example/genhtml.py b/testing/web-platform/tests/tools/py/doc/example/genhtml.py
new file mode 100644
index 000000000..b5c8f525b
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/example/genhtml.py
@@ -0,0 +1,13 @@
+from py.xml import html
+
+paras = "First Para", "Second para"
+
+doc = html.html(
+ html.head(
+ html.meta(name="Content-Type", value="text/html; charset=latin1")),
+ html.body(
+ [html.p(p) for p in paras]))
+
+print unicode(doc).encode('latin1')
+
+
diff --git a/testing/web-platform/tests/tools/py/doc/example/genhtmlcss.py b/testing/web-platform/tests/tools/py/doc/example/genhtmlcss.py
new file mode 100644
index 000000000..3e6d0af54
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/example/genhtmlcss.py
@@ -0,0 +1,23 @@
+import py
+html = py.xml.html
+
+class my(html):
+ "a custom style"
+ class body(html.body):
+ style = html.Style(font_size = "120%")
+
+ class h2(html.h2):
+ style = html.Style(background = "grey")
+
+ class p(html.p):
+ style = html.Style(font_weight="bold")
+
+doc = my.html(
+ my.head(),
+ my.body(
+ my.h2("hello world"),
+ my.p("bold as bold can")
+ )
+)
+
+print doc.unicode(indent=2)
diff --git a/testing/web-platform/tests/tools/py/doc/example/genxml.py b/testing/web-platform/tests/tools/py/doc/example/genxml.py
new file mode 100644
index 000000000..5f754e889
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/example/genxml.py
@@ -0,0 +1,17 @@
+
+import py
+class ns(py.xml.Namespace):
+ pass
+
+doc = ns.books(
+ ns.book(
+ ns.author("May Day"),
+ ns.title("python for java programmers"),),
+ ns.book(
+ ns.author("why", class_="somecssclass"),
+ ns.title("Java for Python programmers"),),
+ publisher="N.N",
+ )
+print doc.unicode(indent=2).encode('utf8')
+
+
diff --git a/testing/web-platform/tests/tools/py/doc/faq.txt b/testing/web-platform/tests/tools/py/doc/faq.txt
new file mode 100644
index 000000000..52cb4b3fb
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/faq.txt
@@ -0,0 +1,172 @@
+==================================
+Frequently Asked Questions
+==================================
+
+.. contents::
+ :local:
+ :depth: 2
+
+
+On naming, nosetests, licensing and magic
+===========================================
+
+Why the ``py`` naming? Why not ``pytest``?
+----------------------------------------------------
+
+This mostly has historic reasons - the aim is
+to get away from the somewhat questionable 'py' name
+at some point. These days (2010) the 'py' library
+almost completely comprises APIs that are used
+by the ``py.test`` tool. There also are some
+other uses, e.g. of the ``py.path.local()`` and
+other path implementations. So it requires some
+work to factor them out and do the shift.
+
+Why the ``py.test`` naming?
+------------------------------------
+
+because of TAB-completion under Bash/Shells. If you hit
+``py.<TAB>`` you'll get a list of available development
+tools that all share the ``py.`` prefix. Another motivation
+was to unify the package ("py.test") and tool filename.
+
+What's py.test's relation to ``nosetests``?
+---------------------------------------------
+
+py.test and nose_ share basic philosophy when it comes
+to running Python tests. In fact,
+with py.test-1.1.0 it is ever easier to run many test suites
+that currently work with ``nosetests``. nose_ was created
+as a clone of ``py.test`` when py.test was in the ``0.8`` release
+cycle so some of the newer features_ introduced with py.test-1.0
+and py.test-1.1 have no counterpart in nose_.
+
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/
+.. _features: test/features.html
+.. _apipkg: http://pypi.python.org/pypi/apipkg
+
+
+What's this "magic" with py.test?
+----------------------------------------
+
+issues where people have used the term "magic" in the past:
+
+* `py/__init__.py`_ uses the apipkg_ mechanism for lazy-importing
+ and full control on what API you get when importing "import py".
+
+* when an ``assert`` statement fails, py.test re-interprets the expression
+ to show intermediate values if a test fails. If your expression
+ has side effects the intermediate values may not be the same, obfuscating
+ the initial error (this is also explained at the command line if it happens).
+ ``py.test --no-assert`` turns off assert re-intepretation.
+ Sidenote: it is good practise to avoid asserts with side effects.
+
+
+.. _`py namespaces`: index.html
+.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
+
+Where does my ``py.test`` come/import from?
+----------------------------------------------
+
+You can issue::
+
+ py.test --version
+
+which tells you both version and import location of the tool.
+
+
+function arguments, parametrized tests and setup
+====================================================
+
+.. _funcargs: test/funcargs.html
+
+Is using funcarg- versus xUnit-based setup a style question?
+---------------------------------------------------------------
+
+It depends. For simple applications or for people experienced
+with nose_ or unittest-style test setup using `xUnit style setup`_
+make some sense. For larger test suites, parametrized testing
+or setup of complex test resources using funcargs_ is recommended.
+Moreover, funcargs are ideal for writing advanced test support
+code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs)
+because the support code can register setup/teardown functions
+in a managed class/module/function scope.
+
+.. _monkeypatch: test/plugin/monkeypatch.html
+.. _tmpdir: test/plugin/tmpdir.html
+.. _capture: test/plugin/capture.html
+.. _`xUnit style setup`: test/xunit_setup.html
+.. _`pytest_nose`: test/plugin/nose.html
+
+.. _`why pytest_pyfuncarg__ methods?`:
+
+Why the ``pytest_funcarg__*`` name for funcarg factories?
+---------------------------------------------------------------
+
+When experimenting with funcargs an explicit registration mechanism
+was considered. But lacking a good use case for this indirection and
+flexibility we decided to go for `Convention over Configuration`_ and
+allow to directly specify the factory. Besides removing the need
+for an indirection it allows to "grep" for ``pytest_funcarg__MYARG``
+and will safely find all factory functions for the ``MYARG`` function
+argument. It helps to alleviate the de-coupling of function
+argument usage and creation.
+
+.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
+
+Can I yield multiple values from a factory function?
+-----------------------------------------------------
+
+There are two conceptual reasons why yielding from a factory function
+is not possible:
+
+* Calling factories for obtaining test function arguments
+ is part of setting up and running a test. At that
+ point it is not possible to add new test calls to
+ the test collection anymore.
+
+* If multiple factories yielded values there would
+ be no natural place to determine the combination
+ policy - in real-world examples some combinations
+ often should not run.
+
+Use the `pytest_generate_tests`_ hook to solve both issues
+and implement the `parametrization scheme of your choice`_.
+
+.. _`pytest_generate_tests`: test/funcargs.html#parametrizing-tests
+.. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
+
+
+py.test interaction with other packages
+===============================================
+
+Issues with py.test, multiprocess and setuptools?
+------------------------------------------------------------
+
+On windows the multiprocess package will instantiate sub processes
+by pickling and thus implicitely re-import a lot of local modules.
+Unfortuantely, setuptools-0.6.11 does not ``if __name__=='__main__'``
+protect its generated command line script. This leads to infinite
+recursion when running a test that instantiates Processes.
+There are these workarounds:
+
+* `install Distribute`_ as a drop-in replacement for setuptools
+ and install py.test
+
+* `directly use a checkout`_ which avoids all setuptools/Distribute
+ installation
+
+If those options are not available to you, you may also manually
+fix the script that is created by setuptools by inserting an
+``if __name__ == '__main__'``. Or you can create a "pytest.py"
+script with this content and invoke that with the python version::
+
+ import py
+ if __name__ == '__main__':
+ py.cmdline.pytest()
+
+.. _`directly use a checkout`: install.html#directly-use-a-checkout
+
+.. _`install distribute`: http://pypi.python.org/pypi/distribute#installation-instructions
+
+
diff --git a/testing/web-platform/tests/tools/py/doc/img/pylib.png b/testing/web-platform/tests/tools/py/doc/img/pylib.png
new file mode 100644
index 000000000..2e10d4388
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/img/pylib.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/py/doc/index.txt b/testing/web-platform/tests/tools/py/doc/index.txt
new file mode 100644
index 000000000..7eb5c6390
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/index.txt
@@ -0,0 +1,43 @@
+.. py documentation master file, created by
+ sphinx-quickstart on Thu Oct 21 08:30:10 2010.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to py's documentation!
+=================================
+
+see :ref:`CHANGELOG <changelog>` for latest changes.
+
+.. note::
+
+ Since version 1.4, the testing tool "py.test" is part of its own `pytest distribution`_.
+
+.. _`pytest distribution`: http://pytest.org
+
+Contents:
+
+.. toctree::
+
+ install
+ path
+ code
+ io
+ log
+ xml
+ misc
+
+ :maxdepth: 2
+
+.. toctree::
+ :hidden:
+
+ announce/release-2.0.0
+ changelog
+ announce/*
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`search`
+
diff --git a/testing/web-platform/tests/tools/py/doc/install.txt b/testing/web-platform/tests/tools/py/doc/install.txt
new file mode 100644
index 000000000..d0e981def
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/install.txt
@@ -0,0 +1,88 @@
+
+.. _`py`:
+.. _`index page`: http://pypi.python.org/pypi/py/
+
+installation info in a nutshell
+===================================================
+
+**PyPI name**: py_
+
+**Pythons**: CPython 2.6, 2.7, 3.3, 3.4, PyPy-2.3
+
+**Operating systems**: Linux, Windows, OSX, Unix
+
+**Requirements**: setuptools_ or Distribute_
+
+**Installers**: ``easy_install`` and ``pip``
+
+**hg repository**: https://bitbucket.org/hpk42/py
+
+easy install or pip ``py``
+-----------------------------
+
+Both `Distribute`_ and setuptools_ provide the ``easy_install``
+installation tool with which you can type into a command line window::
+
+ easy_install -U py
+
+to install the latest release of the py lib. The ``-U`` switch
+will trigger an upgrade if you already have an older version installed.
+
+.. note::
+
+ As of version 1.4 py does not contain py.test anymore - you
+ need to install the new `pytest`_ distribution.
+
+.. _pytest: http://pytest.org
+
+Working from version control or a tarball
+-----------------------------------------------
+
+To follow development or start experiments, checkout the
+complete code and documentation source with mercurial_::
+
+ hg clone https://bitbucket.org/hpk42/py
+
+Development takes place on the 'trunk' branch.
+
+You can also go to the python package index and
+download and unpack a TAR file::
+
+ http://pypi.python.org/pypi/py/
+
+activating a checkout with setuptools
+--------------------------------------------
+
+With a working `Distribute`_ or setuptools_ installation you can type::
+
+ python setup.py develop
+
+in order to work inline with the tools and the lib of your checkout.
+
+.. _`no-setuptools`:
+
+.. _`directly use a checkout`:
+
+.. _`setuptools`: http://pypi.python.org/pypi/setuptools
+
+
+Mailing list and issue tracker
+--------------------------------------
+
+- `py-dev developers list`_ and `commit mailing list`_.
+
+- #pylib on irc.freenode.net IRC channel for random questions.
+
+- `bitbucket issue tracker`_ use this bitbucket issue tracker to report
+ bugs or request features.
+
+.. _`bitbucket issue tracker`: http://bitbucket.org/hpk42/py/issues/
+
+.. _codespeak: http://codespeak.net/
+.. _`py-dev`:
+.. _`development mailing list`:
+.. _`py-dev developers list`: http://codespeak.net/mailman/listinfo/py-dev
+.. _`py-svn`:
+.. _`commit mailing list`: http://codespeak.net/mailman/listinfo/py-svn
+
+.. include:: links.inc
diff --git a/testing/web-platform/tests/tools/py/doc/io.txt b/testing/web-platform/tests/tools/py/doc/io.txt
new file mode 100644
index 000000000..c11308a6d
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/io.txt
@@ -0,0 +1,59 @@
+=======
+py.io
+=======
+
+
+The 'py' lib provides helper classes for capturing IO during
+execution of a program.
+
+IO Capturing examples
+===============================================
+
+``py.io.StdCapture``
+---------------------------
+
+Basic Example::
+
+ >>> import py
+ >>> capture = py.io.StdCapture()
+ >>> print "hello"
+ >>> out,err = capture.reset()
+ >>> out.strip() == "hello"
+ True
+
+For calling functions you may use a shortcut::
+
+ >>> import py
+ >>> def f(): print "hello"
+ >>> res, out, err = py.io.StdCapture.call(f)
+ >>> out.strip() == "hello"
+ True
+
+``py.io.StdCaptureFD``
+---------------------------
+
+If you also want to capture writes to the stdout/stderr
+filedescriptors you may invoke::
+
+ >>> import py, sys
+ >>> capture = py.io.StdCaptureFD(out=False, in_=False)
+ >>> sys.stderr.write("world")
+ >>> out,err = capture.reset()
+ >>> err
+ 'world'
+
+py.io object reference
+============================
+
+.. autoclass:: py.io.StdCaptureFD
+ :members:
+ :inherited-members:
+
+.. autoclass:: py.io.StdCapture
+ :members:
+ :inherited-members:
+
+.. autoclass:: py.io.TerminalWriter
+ :members:
+ :inherited-members:
+
diff --git a/testing/web-platform/tests/tools/py/doc/links.inc b/testing/web-platform/tests/tools/py/doc/links.inc
new file mode 100644
index 000000000..9bcfe5cf8
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/links.inc
@@ -0,0 +1,16 @@
+
+.. _`skipping plugin`: plugin/skipping.html
+.. _`funcargs mechanism`: funcargs.html
+.. _`doctest.py`: http://docs.python.org/library/doctest.html
+.. _`xUnit style setup`: xunit_setup.html
+.. _`pytest_nose`: plugin/nose.html
+.. _`reStructured Text`: http://docutils.sourceforge.net
+.. _`Python debugger`: http://docs.python.org/lib/module-pdb.html
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
+.. _pytest: http://pypi.python.org/pypi/pytest
+.. _mercurial: http://mercurial.selenic.com/wiki/
+.. _`setuptools`: http://pypi.python.org/pypi/setuptools
+.. _`distribute`: http://pypi.python.org/pypi/distribute
+.. _`pip`: http://pypi.python.org/pypi/pip
+.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
+.. _hudson: http://hudson-ci.org/
diff --git a/testing/web-platform/tests/tools/py/doc/log.txt b/testing/web-platform/tests/tools/py/doc/log.txt
new file mode 100644
index 000000000..ca60fcac2
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/log.txt
@@ -0,0 +1,208 @@
+.. role:: code(literal)
+.. role:: file(literal)
+
+.. XXX figure out how the code literals should be dealt with in sphinx. There is probably something builtin.
+
+========================================
+py.log documentation and musings
+========================================
+
+
+Foreword
+========
+
+This document is an attempt to briefly state the actual specification of the
+:code:`py.log` module. It was written by Francois Pinard and also contains
+some ideas for enhancing the py.log facilities.
+
+NOTE that :code:`py.log` is subject to refactorings, it may change with
+the next release.
+
+This document is meant to trigger or facilitate discussions. It shamelessly
+steals from the `Agile Testing`__ comments, and from other sources as well,
+without really trying to sort them out.
+
+__ http://agiletesting.blogspot.com/2005/06/keyword-based-logging-with-py-library.html
+
+
+Logging organisation
+====================
+
+The :code:`py.log` module aims a niche comparable to the one of the
+`logging module`__ found within the standard Python distributions, yet
+with much simpler paradigms for configuration and usage.
+
+__ http://www.python.org/doc/2.4.2/lib/module-logging.html
+
+Holger Krekel, the main :code:`py` library developer, introduced
+the idea of keyword-based logging and the idea of logging *producers* and
+*consumers*. A log producer is an object used by the application code
+to send messages to various log consumers. When you create a log
+producer, you define a set of keywords that are then used to both route
+the logging messages to consumers, and to prefix those messages.
+
+In fact, each log producer has a few keywords associated with it for
+identification purposes. These keywords form a tuple of strings, and
+may be used to later retrieve a particular log producer.
+
+A log producer may (or may not) be associated with a log consumer, meant
+to handle log messages in particular ways. The log consumers can be
+``STDOUT``, ``STDERR``, log files, syslog, the Windows Event Log, user
+defined functions, etc. (Yet, logging to syslog or to the Windows Event
+Log is only future plans for now). A log producer has never more than
+one consumer at a given time, but it is possible to dynamically switch
+a producer to use another consumer. On the other hand, a single log
+consumer may be associated with many producers.
+
+Note that creating and associating a producer and a consumer is done
+automatically when not otherwise overriden, so using :code:`py` logging
+is quite comfortable even in the smallest programs. More typically,
+the application programmer will likely design a hierarchy of producers,
+and will select keywords appropriately for marking the hierarchy tree.
+If a node of the hierarchical tree of producers has to be divided in
+sub-trees, all producers in the sub-trees share, as a common prefix, the
+keywords of the node being divided. In other words, we go further down
+in the hierarchy of producers merely by adding keywords.
+
+Using the py.log library
+================================
+
+To use the :code:`py.log` library, the user must import it into a Python
+application, create at least one log producer and one log consumer, have
+producers and consumers associated, and finally call the log producers
+as needed, giving them log messages.
+
+Importing
+---------
+
+Once the :code:`py` library is installed on your system, a mere::
+
+ import py
+
+holds enough magic for lazily importing the various facilities of the
+:code:`py` library when they are first needed. This is really how
+:code:`py.log` is made available to the application. For example, after
+the above ``import py``, one may directly write ``py.log.Producer(...)``
+and everything should work fine, the user does not have to worry about
+specifically importing more modules.
+
+Creating a producer
+-------------------
+
+There are three ways for creating a log producer instance:
+
+ + As soon as ``py.log`` is first evaluated within an application
+ program, a default log producer is created, and made available under
+ the name ``py.log.default``. The keyword ``default`` is associated
+ with that producer.
+
+ + The ``py.log.Producer()`` constructor may be explicitly called
+ for creating a new instance of a log producer. That constructor
+ accepts, as an argument, the keywords that should be associated with
+ that producer. Keywords may be given either as a tuple of keyword
+ strings, or as a single space-separated string of keywords.
+
+ + Whenever an attribute is *taken* out of a log producer instance,
+ for the first time that attribute is taken, a new log producer is
+ created. The keywords associated with that new producer are those
+ of the initial producer instance, to which is appended the name of
+ the attribute being taken.
+
+The last point is especially useful, as it allows using log producers
+without further declarations, merely creating them *on-the-fly*.
+
+Creating a consumer
+-------------------
+
+There are many ways for creating or denoting a log consumer:
+
+ + A default consumer exists within the ``py.log`` facilities, which
+ has the effect of writing log messages on the Python standard output
+ stream. That consumer is associated at the very top of the producer
+ hierarchy, and as such, is called whenever no other consumer is
+ found.
+
+ + The notation ``py.log.STDOUT`` accesses a log consumer which writes
+ log messages on the Python standard output stream.
+
+ + The notation ``py.log.STDERR`` accesses a log consumer which writes
+ log messages on the Python standard error stream.
+
+ + The ``py.log.File()`` constructor accepts, as argument, either a file
+ already opened in write mode or any similar file-like object, and
+ creates a log consumer able to write log messages onto that file.
+
+ + The ``py.log.Path()`` constructor accepts a file name for its first
+ argument, and creates a log consumer able to write log messages into
+ that file. The constructor call accepts a few keyword parameters:
+
+ + ``append``, which is ``False`` by default, may be used for
+ opening the file in append mode instead of write mode.
+
+ + ``delayed_create``, which is ``False`` by default, maybe be used
+ for opening the file at the latest possible time. Consequently,
+ the file will not be created if it did not exist, and no actual
+ log message gets written to it.
+
+ + ``buffering``, which is 1 by default, is used when opening the
+ file. Buffering can be turned off by specifying a 0 value. The
+ buffer size may also be selected through this argument.
+
+ + Any user defined function may be used for a log consumer. Such a
+ function should accept a single argument, which is the message to
+ write, and do whatever is deemed appropriate by the programmer.
+ When the need arises, this may be an especially useful and flexible
+ feature.
+
+ + The special value ``None`` means no consumer at all. This acts just
+ like if there was a consumer which would silently discard all log
+ messages sent to it.
+
+Associating producers and consumers
+-----------------------------------
+
+Each log producer may have at most one log consumer associated with
+it. A log producer gets associated with a log consumer through a
+``py.log.setconsumer()`` call. That function accepts two arguments,
+the first identifying a producer (a tuple of keyword strings or a single
+space-separated string of keywords), the second specifying the precise
+consumer to use for that producer. Until this function is called for a
+producer, that producer does not have any explicit consumer associated
+with it.
+
+Now, the hierarchy of log producers establishes which consumer gets used
+whenever a producer has no explicit consumer. When a log producer
+has no consumer explicitly associated with it, it dynamically and
+recursively inherits the consumer of its parent node, that is, that node
+being a bit closer to the root of the hierarchy. In other words, the
+rightmost keywords of that producer are dropped until another producer
+is found which has an explicit consumer. A nice side-effect is that,
+by explicitly associating a consumer with a producer, all consumer-less
+producers which appear under that producer, in the hierarchy tree,
+automatically *inherits* that consumer.
+
+Writing log messages
+--------------------
+
+All log producer instances are also functions, and this is by calling
+them that log messages are generated. Each call to a producer object
+produces the text for one log entry, which in turn, is sent to the log
+consumer for that producer.
+
+The log entry displays, after a prefix identifying the log producer
+being used, all arguments given in the call, converted to strings and
+space-separated. (This is meant by design to be fairly similar to what
+the ``print`` statement does in Python). The prefix itself is made up
+of a colon-separated list of keywords associated with the producer, the
+whole being set within square brackets.
+
+Note that the consumer is responsible for adding the newline at the end
+of the log entry. That final newline is not part of the text for the
+log entry.
+
+.. Other details
+.. -------------
+.. XXX: fill in details
+.. + Should speak about pickle-ability of :code:`py.log`.
+..
+.. + What is :code:`log.get` (in :file:`logger.py`)?
diff --git a/testing/web-platform/tests/tools/py/doc/misc.txt b/testing/web-platform/tests/tools/py/doc/misc.txt
new file mode 100644
index 000000000..8c3c0b3f7
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/misc.txt
@@ -0,0 +1,93 @@
+====================================
+Miscellaneous features of the py lib
+====================================
+
+Mapping the standard python library into py
+===========================================
+
+The ``py.std`` object allows lazy access to
+standard library modules. For example, to get to the print-exception
+functionality of the standard library you can write::
+
+ py.std.traceback.print_exc()
+
+without having to do anything else than the usual ``import py``
+at the beginning. You can access any other top-level standard
+library module this way. This means that you will only trigger
+imports of modules that are actually needed. Note that no attempt
+is made to import submodules.
+
+Support for interaction with system utilities/binaries
+======================================================
+
+Currently, the py lib offers two ways to interact with
+system executables. ``py.process.cmdexec()`` invokes
+the shell in order to execute a string. The other
+one, ``py.path.local``'s 'sysexec()' method lets you
+directly execute a binary.
+
+Both approaches will raise an exception in case of a return-
+code other than 0 and otherwise return the stdout-output
+of the child process.
+
+The shell based approach
+------------------------
+
+You can execute a command via your system shell
+by doing something like::
+
+ out = py.process.cmdexec('ls -v')
+
+However, the ``cmdexec`` approach has a few shortcomings:
+
+- it relies on the underlying system shell
+- it neccessitates shell-escaping for expressing arguments
+- it does not easily allow to "fix" the binary you want to run.
+- it only allows to execute executables from the local
+ filesystem
+
+.. _sysexec:
+
+local paths have ``sysexec``
+----------------------------
+
+In order to synchronously execute an executable file you
+can use ``sysexec``::
+
+ binsvn.sysexec('ls', 'http://codespeak.net/svn')
+
+where ``binsvn`` is a path that points to the ``svn`` commandline
+binary. Note that this function does not offer any shell-escaping
+so you have to pass in already separated arguments.
+
+finding an executable local path
+--------------------------------
+
+Finding an executable is quite different on multiple platforms.
+Currently, the ``PATH`` environment variable based search on
+unix platforms is supported::
+
+ py.path.local.sysfind('svn')
+
+which returns the first path whose ``basename`` matches ``svn``.
+In principle, `sysfind` deploys platform specific algorithms
+to perform the search. On Windows, for example, it may look
+at the registry (XXX).
+
+To make the story complete, we allow to pass in a second ``checker``
+argument that is called for each found executable. For example, if
+you have multiple binaries available you may want to select the
+right version::
+
+ def mysvn(p):
+ """ check that the given svn binary has version 1.1. """
+ line = p.execute('--version'').readlines()[0]
+ if line.find('version 1.1'):
+ return p
+ binsvn = py.path.local.sysfind('svn', checker=mysvn)
+
+
+Cross-Python Version compatibility helpers
+=============================================
+
+The ``py.builtin`` namespace provides a number of helpers that help to write python code compatible across Python interpreters, mainly Python2 and Python3. Type ``help(py.builtin)`` on a Python prompt for a the selection of builtins.
diff --git a/testing/web-platform/tests/tools/py/doc/path.txt b/testing/web-platform/tests/tools/py/doc/path.txt
new file mode 100644
index 000000000..837c1d192
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/path.txt
@@ -0,0 +1,260 @@
+=======
+py.path
+=======
+
+The 'py' lib provides a uniform high-level api to deal with filesystems
+and filesystem-like interfaces: ``py.path``. It aims to offer a central
+object to fs-like object trees (reading from and writing to files, adding
+files/directories, examining the types and structure, etc.), and out-of-the-box
+provides a number of implementations of this API.
+
+py.path.local - local file system path
+===============================================
+
+.. _`local`:
+
+basic interactive example
+-------------------------------------
+
+The first and most obvious of the implementations is a wrapper around a local
+filesystem. It's just a bit nicer in usage than the regular Python APIs, and
+of course all the functionality is bundled together rather than spread over a
+number of modules.
+
+Example usage, here we use the ``py.test.ensuretemp()`` function to create
+a ``py.path.local`` object for us (which wraps a directory):
+
+.. sourcecode:: pycon
+
+ >>> import py
+ >>> temppath = py.test.ensuretemp('py.path_documentation')
+ >>> foopath = temppath.join('foo') # get child 'foo' (lazily)
+ >>> foopath.check() # check if child 'foo' exists
+ False
+ >>> foopath.write('bar') # write some data to it
+ >>> foopath.check()
+ True
+ >>> foopath.read()
+ 'bar'
+ >>> foofile = foopath.open() # return a 'real' file object
+ >>> foofile.read(1)
+ 'b'
+
+reference documentation
+---------------------------------
+
+.. autoclass:: py._path.local.LocalPath
+ :members:
+ :inherited-members:
+
+``py.path.svnurl`` and ``py.path.svnwc``
+==================================================
+
+Two other ``py.path`` implementations that the py lib provides wrap the
+popular `Subversion`_ revision control system: the first (called 'svnurl')
+by interfacing with a remote server, the second by wrapping a local checkout.
+Both allow you to access relatively advanced features such as metadata and
+versioning, and both in a way more user-friendly manner than existing other
+solutions.
+
+Some example usage of ``py.path.svnurl``:
+
+.. sourcecode:: pycon
+
+ .. >>> import py
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> url = py.path.svnurl('http://codespeak.net/svn/py')
+ >>> info = url.info()
+ >>> info.kind
+ 'dir'
+ >>> firstentry = url.log()[-1]
+ >>> import time
+ >>> time.strftime('%Y-%m-%d', time.gmtime(firstentry.date))
+ '2004-10-02'
+
+Example usage of ``py.path.svnwc``:
+
+.. sourcecode:: pycon
+
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> temp = py.test.ensuretemp('py.path_documentation')
+ >>> wc = py.path.svnwc(temp.join('svnwc'))
+ >>> wc.checkout('http://codespeak.net/svn/py/dist/py/path/local')
+ >>> wc.join('local.py').check()
+ True
+
+.. _`Subversion`: http://subversion.tigris.org/
+
+svn path related API reference
+-----------------------------------------
+
+.. autoclass:: py._path.svnwc.SvnWCCommandPath
+ :members:
+ :inherited-members:
+
+.. autoclass:: py._path.svnurl.SvnCommandPath
+ :members:
+ :inherited-members:
+
+.. autoclass:: py._path.svnwc.SvnAuth
+ :members:
+ :inherited-members:
+
+Common vs. specific API, Examples
+========================================
+
+All Path objects support a common set of operations, suitable
+for many use cases and allowing to transparently switch the
+path object within an application (e.g. from "local" to "svnwc").
+The common set includes functions such as `path.read()` to read all data
+from a file, `path.write()` to write data, `path.listdir()` to get a list
+of directory entries, `path.check()` to check if a node exists
+and is of a particular type, `path.join()` to get
+to a (grand)child, `path.visit()` to recursively walk through a node's
+children, etc. Only things that are not common on 'normal' filesystems (yet),
+such as handling metadata (e.g. the Subversion "properties") require
+using specific APIs.
+
+A quick 'cookbook' of small examples that will be useful 'in real life',
+which also presents parts of the 'common' API, and shows some non-common
+methods:
+
+Searching `.txt` files
+--------------------------------
+
+Search for a particular string inside all files with a .txt extension in a
+specific directory.
+
+.. sourcecode:: pycon
+
+ >>> dirpath = temppath.ensure('testdir', dir=True)
+ >>> dirpath.join('textfile1.txt').write('foo bar baz')
+ >>> dirpath.join('textfile2.txt').write('frob bar spam eggs')
+ >>> subdir = dirpath.ensure('subdir', dir=True)
+ >>> subdir.join('textfile1.txt').write('foo baz')
+ >>> subdir.join('textfile2.txt').write('spam eggs spam foo bar spam')
+ >>> results = []
+ >>> for fpath in dirpath.visit('*.txt'):
+ ... if 'bar' in fpath.read():
+ ... results.append(fpath.basename)
+ >>> results.sort()
+ >>> results
+ ['textfile1.txt', 'textfile2.txt', 'textfile2.txt']
+
+Working with Paths
+----------------------------
+
+This example shows the ``py.path`` features to deal with
+filesystem paths Note that the filesystem is never touched,
+all operations are performed on a string level (so the paths
+don't have to exist, either):
+
+.. sourcecode:: pycon
+
+ >>> p1 = py.path.local('/foo/bar')
+ >>> p2 = p1.join('baz/qux')
+ >>> p2 == py.path.local('/foo/bar/baz/qux')
+ True
+ >>> sep = py.path.local.sep
+ >>> p2.relto(p1).replace(sep, '/') # os-specific path sep in the string
+ 'baz/qux'
+ >>> p2.bestrelpath(p1).replace(sep, '/')
+ '../..'
+ >>> p2.join(p2.bestrelpath(p1)) == p1
+ True
+ >>> p3 = p1 / 'baz/qux' # the / operator allows joining, too
+ >>> p2 == p3
+ True
+ >>> p4 = p1 + ".py"
+ >>> p4.basename == "bar.py"
+ True
+ >>> p4.ext == ".py"
+ True
+ >>> p4.purebasename == "bar"
+ True
+
+This should be possible on every implementation of ``py.path``, so
+regardless of whether the implementation wraps a UNIX filesystem, a Windows
+one, or a database or object tree, these functions should be available (each
+with their own notion of path seperators and dealing with conversions, etc.).
+
+Checking path types
+-------------------------------
+
+Now we will show a bit about the powerful 'check()' method on paths, which
+allows you to check whether a file exists, what type it is, etc.:
+
+.. sourcecode:: pycon
+
+ >>> file1 = temppath.join('file1')
+ >>> file1.check() # does it exist?
+ False
+ >>> file1 = file1.ensure(file=True) # 'touch' the file
+ >>> file1.check()
+ True
+ >>> file1.check(dir=True) # is it a dir?
+ False
+ >>> file1.check(file=True) # or a file?
+ True
+ >>> file1.check(ext='.txt') # check the extension
+ False
+ >>> textfile = temppath.ensure('text.txt', file=True)
+ >>> textfile.check(ext='.txt')
+ True
+ >>> file1.check(basename='file1') # we can use all the path's properties here
+ True
+
+Setting svn-properties
+--------------------------------
+
+As an example of 'uncommon' methods, we'll show how to read and write
+properties in an ``py.path.svnwc`` instance:
+
+.. sourcecode:: pycon
+
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> wc.propget('foo')
+ ''
+ >>> wc.propset('foo', 'bar')
+ >>> wc.propget('foo')
+ 'bar'
+ >>> len(wc.status().prop_modified) # our own props
+ 1
+ >>> msg = wc.revert() # roll back our changes
+ >>> len(wc.status().prop_modified)
+ 0
+
+SVN authentication
+----------------------------
+
+Some uncommon functionality can also be provided as extensions, such as SVN
+authentication:
+
+.. sourcecode:: pycon
+
+ .. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
+ >>> auth = py.path.SvnAuth('anonymous', 'user', cache_auth=False,
+ ... interactive=False)
+ >>> wc.auth = auth
+ >>> wc.update() # this should work
+ >>> path = wc.ensure('thisshouldnotexist.txt')
+ >>> try:
+ ... path.commit('testing')
+ ... except py.process.cmdexec.Error, e:
+ ... pass
+ >>> 'authorization failed' in str(e)
+ True
+
+Known problems / limitations
+===================================
+
+* The SVN path objects require the "svn" command line,
+ there is currently no support for python bindings.
+ Parsing the svn output can lead to problems, particularly
+ regarding if you have a non-english "locales" setting.
+
+* While the path objects basically work on windows,
+ there is no attention yet on making unicode paths
+ work or deal with the famous "8.3" filename issues.
+
+
diff --git a/testing/web-platform/tests/tools/py/doc/style.css b/testing/web-platform/tests/tools/py/doc/style.css
new file mode 100644
index 000000000..1faf762c7
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/style.css
@@ -0,0 +1,1044 @@
+body,body.editor,body.body {
+ font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif;
+ background: White;
+ color: Black;
+}
+
+a, a.reference {
+ text-decoration: none;
+}
+a[href]:hover { text-decoration: underline; }
+
+img {
+ border: none;
+ vertical-align: middle;
+}
+
+p, div.text {
+ text-align: left;
+ line-height: 1.5em;
+ margin: 0.5em 0em 0em 0em;
+}
+
+
+
+p a:active {
+ color: Red;
+ background-color: transparent;
+}
+
+p img {
+ border: 0;
+ margin: 0;
+}
+
+img.inlinephoto {
+ padding: 0;
+ padding-right: 1em;
+ padding-top: 0.7em;
+ float: left;
+}
+
+hr {
+ clear: both;
+ height: 1px;
+ color: #8CACBB;
+ background-color: transparent;
+}
+
+
+ul {
+ line-height: 1.5em;
+ /*list-style-image: url("bullet.gif"); */
+ margin-left: 1.5em;
+ padding:0;
+}
+
+ol {
+ line-height: 1.5em;
+ margin-left: 1.5em;
+ padding:0;
+}
+
+ul a, ol a {
+ text-decoration: underline;
+}
+
+dl {
+}
+
+dd {
+ line-height: 1.5em;
+ margin-bottom: 1em;
+}
+
+blockquote {
+ font-family: Times, "Times New Roman", serif;
+ font-style: italic;
+ font-size: 120%;
+}
+
+code {
+ color: Black;
+ /*background-color: #dee7ec;*/
+ /*background-color: #cccccc;*/
+}
+
+pre {
+ padding: 1em;
+ border: 1px dotted #8cacbb;
+ color: Black;
+ /*
+ background-color: #dee7ec;
+ background-color: #cccccc;
+ background-color: #dee7ec;
+ */
+ overflow: auto;
+}
+
+
+.netscape4 {
+ display: none;
+}
+
+/* main page styles */
+
+/*a[href]:hover { color: black; text-decoration: underline; }
+a[href]:link { color: black; text-decoration: underline; }
+a[href] { color: black; text-decoration: underline; }
+*/
+
+span.menu_selected {
+ color: black;
+ text-decoration: none;
+ padding-right: 0.3em;
+ background-color: #cccccc;
+}
+
+
+a.menu {
+ /*color: #3ba6ec; */
+ font: 120% Verdana, Helvetica, Arial, sans-serif;
+ text-decoration: none;
+ padding-right: 0.3em;
+}
+
+a.menu[href]:visited, a.menu[href]:link{
+ /*color: #3ba6ec; */
+ text-decoration: none;
+}
+
+a.menu[href]:hover {
+ /*color: black;*/
+}
+
+div#pagetitle{
+ /*border-spacing: 20px;*/
+ font: 160% Verdana, Helvetica, Arial, sans-serif;
+ color: #3ba6ec;
+ vertical-align: middle;
+ left: 80 px;
+ padding-bottom: 0.3em;
+}
+
+a.wikicurrent {
+ font: 100% Verdana, Helvetica, Arial, sans-serif;
+ color: #3ba6ec;
+ vertical-align: middle;
+}
+
+
+table.body {
+ border: 0;
+ /*padding: 0;
+ border-spacing: 0px;
+ border-collapse: separate;
+ */
+}
+
+td.page-header-left {
+ padding: 5px;
+ /*border-bottom: 1px solid #444444;*/
+}
+
+td.page-header-top {
+ padding: 0;
+
+ /*border-bottom: 1px solid #444444;*/
+}
+
+td.sidebar {
+ padding: 1 0 0 1;
+}
+
+td.sidebar p.classblock {
+ padding: 0 5 0 5;
+ margin: 1 1 1 1;
+ border: 1px solid #444444;
+ background-color: #eeeeee;
+}
+
+td.sidebar p.userblock {
+ padding: 0 5 0 5;
+ margin: 1 1 1 1;
+ border: 1px solid #444444;
+ background-color: #eeeeff;
+}
+
+td.content {
+ padding: 1 5 1 5;
+ vertical-align: top;
+ width: 100%;
+}
+
+p.ok-message {
+ background-color: #22bb22;
+ padding: 5 5 5 5;
+ color: white;
+ font-weight: bold;
+}
+p.error-message {
+ background-color: #bb2222;
+ padding: 5 5 5 5;
+ color: white;
+ font-weight: bold;
+}
+
+p:first-child {
+ margin: 0 ;
+ padding: 0;
+}
+
+/* style for forms */
+table.form {
+ padding: 2;
+ border-spacing: 0px;
+ border-collapse: separate;
+}
+
+table.form th {
+ color: #333388;
+ text-align: right;
+ vertical-align: top;
+ font-weight: normal;
+}
+table.form th.header {
+ font-weight: bold;
+ background-color: #eeeeff;
+ text-align: left;
+}
+
+table.form th.required {
+ font-weight: bold;
+}
+
+table.form td {
+ color: #333333;
+ empty-cells: show;
+ vertical-align: top;
+}
+
+table.form td.optional {
+ font-weight: bold;
+ font-style: italic;
+}
+
+table.form td.html {
+ color: #777777;
+}
+
+/* style for lists */
+table.list {
+ border-spacing: 0px;
+ border-collapse: separate;
+ vertical-align: top;
+ padding-top: 0;
+ width: 100%;
+}
+
+table.list th {
+ padding: 0 4 0 4;
+ color: #404070;
+ background-color: #eeeeff;
+ border-right: 1px solid #404070;
+ border-top: 1px solid #404070;
+ border-bottom: 1px solid #404070;
+ vertical-align: top;
+ empty-cells: show;
+}
+table.list th a[href]:hover { color: #404070 }
+table.list th a[href]:link { color: #404070 }
+table.list th a[href] { color: #404070 }
+table.list th.group {
+ background-color: #f4f4ff;
+ text-align: center;
+ font-size: 120%;
+}
+
+table.list td {
+ padding: 0 4 0 4;
+ border: 0 2 0 2;
+ border-right: 1px solid #404070;
+ color: #404070;
+ background-color: white;
+ vertical-align: top;
+ empty-cells: show;
+}
+
+table.list tr.normal td {
+ background-color: white;
+ white-space: nowrap;
+}
+
+table.list tr.alt td {
+ background-color: #efefef;
+ white-space: nowrap;
+}
+
+table.list td:first-child {
+ border-left: 1px solid #404070;
+ border-right: 1px solid #404070;
+}
+
+table.list th:first-child {
+ border-left: 1px solid #404070;
+ border-right: 1px solid #404070;
+}
+
+table.list tr.navigation th {
+ text-align: right;
+}
+table.list tr.navigation th:first-child {
+ border-right: none;
+ text-align: left;
+}
+
+
+/* style for message displays */
+table.messages {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.messages th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.messages th {
+ font-weight: bold;
+ color: black;
+ text-align: left;
+ border-bottom: 1px solid #afafaf;
+}
+
+table.messages td {
+ font-family: monospace;
+ background-color: #efefef;
+ border-bottom: 1px solid #afafaf;
+ color: black;
+ empty-cells: show;
+ border-right: 1px solid #afafaf;
+ vertical-align: top;
+ padding: 2 5 2 5;
+}
+
+table.messages td:first-child {
+ border-left: 1px solid #afafaf;
+ border-right: 1px solid #afafaf;
+}
+
+/* style for file displays */
+table.files {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.files th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.files th {
+ border-bottom: 1px solid #afafaf;
+ font-weight: bold;
+ text-align: left;
+}
+
+table.files td {
+ font-family: monospace;
+ empty-cells: show;
+}
+
+/* style for history displays */
+table.history {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.history th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+ font-size: 100%;
+}
+
+table.history th {
+ border-bottom: 1px solid #afafaf;
+ font-weight: bold;
+ text-align: left;
+ font-size: 90%;
+}
+
+table.history td {
+ font-size: 90%;
+ vertical-align: top;
+ empty-cells: show;
+}
+
+
+/* style for class list */
+table.classlist {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.classlist th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.classlist th {
+ font-weight: bold;
+ text-align: left;
+}
+
+
+/* style for class help display */
+table.classhelp {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.classhelp th {
+ font-weight: bold;
+ text-align: left;
+ color: #707040;
+}
+
+table.classhelp td {
+ padding: 2 2 2 2;
+ border: 1px solid black;
+ text-align: left;
+ vertical-align: top;
+ empty-cells: show;
+}
+
+
+/* style for "other" displays */
+table.otherinfo {
+ border-spacing: 0px;
+ border-collapse: separate;
+ width: 100%;
+}
+
+table.otherinfo th.header{
+ padding-top: 10px;
+ border-bottom: 1px solid gray;
+ font-weight: bold;
+ background-color: white;
+ color: #707040;
+}
+
+table.otherinfo th {
+ border-bottom: 1px solid #afafaf;
+ font-weight: bold;
+ text-align: left;
+}
+
+input {
+ border: 1px solid #8cacbb;
+ color: Black;
+ background-color: white;
+ vertical-align: middle;
+ margin-bottom: 1px; /* IE bug fix */
+ padding: 0.1em;
+}
+
+select {
+ border: 1px solid #8cacbb;
+ color: Black;
+ background-color: white;
+ vertical-align: middle;
+ margin-bottom: 1px; /* IE bug fix */
+ padding: 0.1em;
+}
+
+
+a.nonexistent {
+ color: #FF2222;
+}
+a.nonexistent:visited {
+ color: #FF2222;
+}
+a.external {
+ color: #AA6600;
+}
+
+/*
+dl,ul,ol {
+ margin-top: 1pt;
+}
+tt,pre {
+ font-family: Lucida Console,Courier New,Courier,monotype;
+ font-size: 12pt;
+}
+pre.code {
+ margin-top: 8pt;
+ margin-bottom: 8pt;
+ background-color: #FFFFEE;
+ white-space:pre;
+ border-style:solid;
+ border-width:1pt;
+ border-color:#999999;
+ color:#111111;
+ padding:5px;
+ width:100%;
+}
+*/
+div.diffold {
+ background-color: #FFFF80;
+ border-style:none;
+ border-width:thin;
+ width:100%;
+}
+div.diffnew {
+ background-color: #80FF80;
+ border-style:none;
+ border-width:thin;
+ width:100%;
+}
+div.message {
+ margin-top: 6pt;
+ background-color: #E8FFE8;
+ border-style:solid;
+ border-width:1pt;
+ border-color:#999999;
+ color:#440000;
+ padding:5px;
+ width:100%;
+}
+strong.highlight {
+ background-color: #FFBBBB;
+/* as usual, NetScape fucks up with innocent CSS
+ border-color: #FFAAAA;
+ border-style: solid;
+ border-width: 1pt;
+*/
+}
+
+table.navibar {
+ background-color: #C8C8C8;
+ border-spacing: 3px;
+}
+td.navibar {
+ background-color: #E8E8E8;
+ vertical-align: top;
+ text-align: right;
+ padding: 0px;
+}
+
+a#versioninfo {
+ color: blue;
+}
+
+div#pagename {
+ font-size: 140%;
+ color: blue;
+ text-align: center;
+ font-weight: bold;
+ background-color: white;
+ padding: 0 ;
+}
+
+a.wikiaction, input.wikiaction {
+ color: black;
+ text-decoration: None;
+ text-align: center;
+ color: black;
+ /*border: 1px solid #3ba6ec; */
+ margin: 4px;
+ padding: 5;
+ padding-bottom: 0;
+ white-space: nowrap;
+}
+
+a.wikiaction[href]:hover {
+ color: black;
+ text-decoration: none;
+ /*background-color: #dddddd; */
+}
+
+
+div.legenditem {
+ padding-top: 0.5em;
+ padding-left: 0.3em;
+}
+
+span.wikitoken {
+ background-color: #eeeeee;
+}
+
+
+div#contentspace h1:first-child, div.heading:first-child {
+ padding-top: 0;
+ margin-top: 0;
+}
+div#contentspace h2:first-child {
+ padding-top: 0;
+ margin-top: 0;
+}
+
+/* heading and paragraph text */
+
+div.heading, h1 {
+ font-family: Verdana, Helvetica, Arial, sans-serif;
+ background-color: #58b3ef;
+ background-color: #FFFFFF;
+ /*color: #4893cf;*/
+ color: black;
+ padding-top: 1.0em;
+ padding-bottom:0.2em;
+ text-align: left;
+ margin-top: 0em;
+ /*margin-bottom:8pt;*/
+ font-weight: bold;
+ font-size: 115%;
+ border-bottom: 1px solid #8CACBB;
+}
+
+h2 {
+ border-bottom: 1px dotted #8CACBB;
+}
+
+
+h1, h2, h3, h4, h5, h6 {
+ color: Black;
+ clear: left;
+ font: 100% Verdana, Helvetica, Arial, sans-serif;
+ margin: 0;
+ padding-left: 0em;
+ padding-top: 1em;
+ padding-bottom: 0.2em;
+ /*border-bottom: 1px solid #8CACBB;*/
+}
+/* h1,h2 { padding-top: 0; }*/
+
+
+h1 { font-size: 145%; }
+h2 { font-size: 115%; }
+h3 { font-size: 105%; }
+h4 { font-size: 100%; }
+h5 { font-size: 100%; }
+
+h1 a { text-decoration: None;}
+
+div.exception {
+ background-color: #bb2222;
+ padding: 5 5 5 5;
+ color: white;
+ font-weight: bold;
+}
+pre.exception {
+ font-size: 110%;
+ padding: 1em;
+ border: 1px solid #8cacbb;
+ color: Black;
+ background-color: #dee7ec;
+ background-color: #cccccc;
+}
+
+/* defines for navgiation bar (documentation) */
+
+
+div.direntry {
+ padding-top: 0.3em;
+ padding-bottom: 0.3em;
+ margin-right: 1em;
+ font-weight: bold;
+ background-color: #dee7ec;
+ font-size: 110%;
+}
+
+div.fileentry {
+ font-family: Verdana, Helvetica, Arial, sans-serif;
+ padding-bottom: 0.3em;
+ white-space: nowrap;
+ line-height: 150%;
+}
+
+a.fileentry {
+ white-space: nowrap;
+}
+
+
+span.left {
+ text-align: left;
+}
+span.right {
+ text-align: right;
+}
+
+div.navbar {
+ /*margin: 0;*/
+ font-size: 80% /*smaller*/;
+ font-weight: bold;
+ text-align: left;
+ /* position: fixed; */
+ top: 100pt;
+ left: 0pt; /* auto; */
+ width: 120pt;
+ /* right: auto;
+ right: 0pt; 2em; */
+}
+
+
+div.history a {
+ /* font-size: 70%; */
+}
+
+div.wikiactiontitle {
+ font-weight: bold;
+}
+
+/* REST defines */
+
+div.document {
+ margin: 0;
+}
+
+h1.title {
+ margin: 0;
+ margin-bottom: 0.5em;
+}
+
+td.toplist {
+ vertical-align: top;
+}
+
+img#pyimg {
+ float: left;
+ padding-bottom: 1em;
+}
+
+div#navspace {
+ position: absolute;
+ font-size: 100%;
+ width: 150px;
+ overflow: hidden; /* scroll; */
+}
+
+
+div#errorline {
+ position: relative;
+ top: 5px;
+ float: right;
+}
+
+div#contentspace {
+ position: absolute;
+ /* font: 120% "Times New Roman", serif;*/
+ font: 110% Verdana, Helvetica, Arial, sans-serif;
+ left: 170px;
+ margin-right: 5px;
+}
+
+div#menubar {
+/* width: 400px; */
+ float: left;
+}
+
+/* for the documentation page */
+div#title{
+
+ font-size: 110%;
+ color: black;
+
+
+ /*background-color: #dee7ec;
+ #padding: 5pt;
+ #padding-bottom: 1em;
+ #color: black;
+ border-width: 1pt;
+ border-style: solid;*/
+
+}
+
+div#docnavlist {
+ /*background-color: #dee7ec; */
+ padding: 5pt;
+ padding-bottom: 2em;
+ color: black;
+ border-width: 1pt;
+ /*border-style: solid;*/
+}
+
+
+/* text markup */
+
+div.listtitle {
+ color: Black;
+ clear: left;
+ font: 120% Verdana, Helvetica, Arial, sans-serif;
+ margin: 0;
+ padding-left: 0em;
+ padding-top: 0em;
+ padding-bottom: 0.2em;
+ margin-right: 0.5em;
+ border-bottom: 1px solid #8CACBB;
+}
+
+div.actionbox h3 {
+ padding-top: 0;
+ padding-right: 0.5em;
+ padding-left: 0.5em;
+ background-color: #fabf00;
+ text-align: center;
+ border: 1px solid black; /* 8cacbb; */
+}
+
+div.actionbox a {
+ display: block;
+ padding-bottom: 0.5em;
+ padding-top: 0.5em;
+ margin-left: 0.5em;
+}
+
+div.actionbox a.history {
+ display: block;
+ padding-bottom: 0.5em;
+ padding-top: 0.5em;
+ margin-left: 0.5em;
+ font-size: 90%;
+}
+
+div.actionbox {
+ margin-bottom: 2em;
+ padding-bottom: 1em;
+ overflow: hidden; /* scroll; */
+}
+
+/* taken from docutils (oh dear, a bit senseless) */
+ol.simple, ul.simple {
+ margin-bottom: 1em }
+
+ol.arabic {
+ list-style: decimal }
+
+ol.loweralpha {
+ list-style: lower-alpha }
+
+ol.upperalpha {
+ list-style: upper-alpha }
+
+ol.lowerroman {
+ list-style: lower-roman }
+
+ol.upperroman {
+ list-style: upper-roman }
+
+
+/*
+:Author: David Goodger
+:Contact: goodger@users.sourceforge.net
+:date: $Date: 2003/01/22 22:26:48 $
+:version: $Revision: 1.29 $
+:copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+*/
+/*
+.first {
+ margin-top: 0 }
+
+.last {
+ margin-bottom: 0 }
+
+a.toc-backref {
+ text-decoration: none ;
+ color: black }
+
+dd {
+ margin-bottom: 0.5em }
+
+div.abstract {
+ margin: 2em 5em }
+
+div.abstract p.topic-title {
+ font-weight: bold ;
+ text-align: center }
+
+div.attention, div.caution, div.danger, div.error, div.hint,
+div.important, div.note, div.tip, div.warning {
+ margin: 2em ;
+ border: medium outset ;
+ padding: 1em }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+ color: red ;
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.hint p.admonition-title, div.important p.admonition-title,
+div.note p.admonition-title, div.tip p.admonition-title {
+ font-weight: bold ;
+ font-family: sans-serif }
+
+div.dedication {
+ margin: 2em 5em ;
+ text-align: center ;
+ font-style: italic }
+
+div.dedication p.topic-title {
+ font-weight: bold ;
+ font-style: normal }
+
+div.figure {
+ margin-left: 2em }
+
+div.footer, div.header {
+ font-size: smaller }
+
+div.system-messages {
+ margin: 5em }
+
+div.system-messages h1 {
+ color: red }
+
+div.system-message {
+ border: medium outset ;
+ padding: 1em }
+
+div.system-message p.system-message-title {
+ color: red ;
+ font-weight: bold }
+
+div.topic {
+ margin: 2em }
+
+h1.title {
+ text-align: center }
+
+h2.subtitle {
+ text-align: center }
+
+hr {
+ width: 75% }
+
+p.caption {
+ font-style: italic }
+
+p.credits {
+ font-style: italic ;
+ font-size: smaller }
+
+p.label {
+ white-space: nowrap }
+
+p.topic-title {
+ font-weight: bold }
+
+pre.address {
+ margin-bottom: 0 ;
+ margin-top: 0 ;
+ font-family: serif ;
+ font-size: 100% }
+
+pre.line-block {
+ font-family: serif ;
+ font-size: 100% }
+
+pre.literal-block, pre.doctest-block {
+ margin-left: 2em ;
+ margin-right: 2em ;
+ background-color: #eeeeee }
+
+span.classifier {
+ font-family: sans-serif ;
+ font-style: oblique }
+
+span.classifier-delimiter {
+ font-family: sans-serif ;
+ font-weight: bold }
+
+span.interpreted {
+ font-family: sans-serif }
+
+span.option {
+ white-space: nowrap }
+
+span.option-argument {
+ font-style: italic }
+
+span.pre {
+ white-space: pre }
+
+span.problematic {
+ color: red }
+
+table {
+ margin-top: 0.5em ;
+ margin-bottom: 0.5em }
+
+table.citation {
+ border-left: solid thin gray ;
+ padding-left: 0.5ex }
+
+table.docinfo {
+ margin: 2em 4em }
+
+table.footnote {
+ border-left: solid thin black ;
+ padding-left: 0.5ex }
+
+td, th {
+ padding-left: 0.5em ;
+ padding-right: 0.5em ;
+ vertical-align: top }
+
+th.docinfo-name, th.field-name {
+ font-weight: bold ;
+ text-align: left ;
+ white-space: nowrap }
+
+h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
+ font-size: 100% }
+
+tt {
+ background-color: #eeeeee }
+
+ul.auto-toc {
+ list-style-type: none }
+*/
+
+div.section {
+ margin-top: 1.0em ;
+}
diff --git a/testing/web-platform/tests/tools/py/doc/xml.txt b/testing/web-platform/tests/tools/py/doc/xml.txt
new file mode 100644
index 000000000..1022de6e9
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/doc/xml.txt
@@ -0,0 +1,164 @@
+====================================================
+py.xml: simple pythonic xml/html file generation
+====================================================
+
+Motivation
+==========
+
+There are a plethora of frameworks and libraries to generate
+xml and html trees. However, many of them are large, have a
+steep learning curve and are often hard to debug. Not to
+speak of the fact that they are frameworks to begin with.
+
+.. _xist: http://www.livinglogic.de/Python/xist/index.html
+
+a pythonic object model , please
+================================
+
+The py lib offers a pythonic way to generate xml/html, based on
+ideas from xist_ which `uses python class objects`_ to build
+xml trees. However, xist_'s implementation is somewhat heavy
+because it has additional goals like transformations and
+supporting many namespaces. But its basic idea is very easy.
+
+.. _`uses python class objects`: http://www.livinglogic.de/Python/xist/Howto.html
+
+generating arbitrary xml structures
+-----------------------------------
+
+With ``py.xml.Namespace`` you have the basis
+to generate custom xml-fragments on the fly::
+
+ class ns(py.xml.Namespace):
+ "my custom xml namespace"
+ doc = ns.books(
+ ns.book(
+ ns.author("May Day"),
+ ns.title("python for java programmers"),),
+ ns.book(
+ ns.author("why"),
+ ns.title("Java for Python programmers"),),
+ publisher="N.N",
+ )
+ print doc.unicode(indent=2).encode('utf8')
+
+will give you this representation::
+
+ <books publisher="N.N">
+ <book>
+ <author>May Day</author>
+ <title>python for java programmers</title></book>
+ <book>
+ <author>why</author>
+ <title>Java for Python programmers</title></book></books>
+
+In a sentence: positional arguments are child-tags and
+keyword-arguments are attributes.
+
+On a side note, you'll see that the unicode-serializer
+supports a nice indentation style which keeps your generated
+html readable, basically through emulating python's white
+space significance by putting closing-tags rightmost and
+almost invisible at first glance :-)
+
+basic example for generating html
+---------------------------------
+
+Consider this example::
+
+ from py.xml import html # html namespace
+
+ paras = "First Para", "Second para"
+
+ doc = html.html(
+ html.head(
+ html.meta(name="Content-Type", value="text/html; charset=latin1")),
+ html.body(
+ [html.p(p) for p in paras]))
+
+ print unicode(doc).encode('latin1')
+
+Again, tags are objects which contain tags and have attributes.
+More exactly, Tags inherit from the list type and thus can be
+manipulated as list objects. They additionally support a default
+way to represent themselves as a serialized unicode object.
+
+If you happen to look at the py.xml implementation you'll
+note that the tag/namespace implementation consumes some 50 lines
+with another 50 lines for the unicode serialization code.
+
+CSS-styling your html Tags
+--------------------------
+
+One aspect where many of the huge python xml/html generation
+frameworks utterly fail is a clean and convenient integration
+of CSS styling. Often, developers are left alone with keeping
+CSS style definitions in sync with some style files
+represented as strings (often in a separate .css file). Not
+only is this hard to debug but the missing abstractions make
+it hard to modify the styling of your tags or to choose custom
+style representations (inline, html.head or external). Add the
+Browers usual tolerance of messyness and errors in Style
+references and welcome to hell, known as the domain of
+developing web applications :-)
+
+By contrast, consider this CSS styling example::
+
+ class my(html):
+ "my initial custom style"
+ class body(html.body):
+ style = html.Style(font_size = "120%")
+
+ class h2(html.h2):
+ style = html.Style(background = "grey")
+
+ class p(html.p):
+ style = html.Style(font_weight="bold")
+
+ doc = my.html(
+ my.head(),
+ my.body(
+ my.h2("hello world"),
+ my.p("bold as bold can")
+ )
+ )
+
+ print doc.unicode(indent=2)
+
+This will give you a small'n mean self contained
+represenation by default::
+
+ <html>
+ <head/>
+ <body style="font-size: 120%">
+ <h2 style="background: grey">hello world</h2>
+ <p style="font-weight: bold">bold as bold can</p></body></html>
+
+Most importantly, note that the inline-styling is just an
+implementation detail of the unicode serialization code.
+You can easily modify the serialization to put your styling into the
+``html.head`` or in a separate file and autogenerate CSS-class
+names or ids.
+
+Hey, you could even write tests that you are using correct
+styles suitable for specific browser requirements. Did i mention
+that the ability to easily write tests for your generated
+html and its serialization could help to develop _stable_ user
+interfaces?
+
+More to come ...
+----------------
+
+For now, i don't think we should strive to offer much more
+than the above. However, it is probably not hard to offer
+*partial serialization* to allow generating maybe hundreds of
+complex html documents per second. Basically we would allow
+putting callables both as Tag content and as values of
+attributes. A slightly more advanced Serialization would then
+produce a list of unicode objects intermingled with callables.
+At HTTP-Request time the callables would get called to
+complete the probably request-specific serialization of
+your Tags. Hum, it's probably harder to explain this than to
+actually code it :-)
+
+.. _`py.test`: test/index.html
diff --git a/testing/web-platform/tests/tools/py/py/__init__.py b/testing/web-platform/tests/tools/py/py/__init__.py
new file mode 100644
index 000000000..bdb9aa218
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/__init__.py
@@ -0,0 +1,150 @@
+"""
+py.test and pylib: rapid testing and development utils
+
+this module uses apipkg.py for lazy-loading sub modules
+and classes. The initpkg-dictionary below specifies
+name->value mappings where value can be another namespace
+dictionary or an import path.
+
+(c) Holger Krekel and others, 2004-2014
+"""
+__version__ = '1.4.31'
+
+from py import _apipkg
+
+# so that py.error.* instances are picklable
+import sys
+sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error')
+
+_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={
+ # access to all standard lib modules
+ 'std': '._std:std',
+ # access to all posix errno's as classes
+ 'error': '._error:error',
+
+ '_pydir' : '.__metainfo:pydir',
+ 'version': 'py:__version__', # backward compatibility
+
+ # pytest-2.0 has a flat namespace, we use alias modules
+ # to keep old references compatible
+ 'test' : 'pytest',
+ 'test.collect' : 'pytest',
+ 'test.cmdline' : 'pytest',
+
+ # hook into the top-level standard library
+ 'process' : {
+ '__doc__' : '._process:__doc__',
+ 'cmdexec' : '._process.cmdexec:cmdexec',
+ 'kill' : '._process.killproc:kill',
+ 'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
+ },
+
+ 'apipkg' : {
+ 'initpkg' : '._apipkg:initpkg',
+ 'ApiModule' : '._apipkg:ApiModule',
+ },
+
+ 'iniconfig' : {
+ 'IniConfig' : '._iniconfig:IniConfig',
+ 'ParseError' : '._iniconfig:ParseError',
+ },
+
+ 'path' : {
+ '__doc__' : '._path:__doc__',
+ 'svnwc' : '._path.svnwc:SvnWCCommandPath',
+ 'svnurl' : '._path.svnurl:SvnCommandPath',
+ 'local' : '._path.local:LocalPath',
+ 'SvnAuth' : '._path.svnwc:SvnAuth',
+ },
+
+ # python inspection/code-generation API
+ 'code' : {
+ '__doc__' : '._code:__doc__',
+ 'compile' : '._code.source:compile_',
+ 'Source' : '._code.source:Source',
+ 'Code' : '._code.code:Code',
+ 'Frame' : '._code.code:Frame',
+ 'ExceptionInfo' : '._code.code:ExceptionInfo',
+ 'Traceback' : '._code.code:Traceback',
+ 'getfslineno' : '._code.source:getfslineno',
+ 'getrawcode' : '._code.code:getrawcode',
+ 'patch_builtins' : '._code.code:patch_builtins',
+ 'unpatch_builtins' : '._code.code:unpatch_builtins',
+ '_AssertionError' : '._code.assertion:AssertionError',
+ '_reinterpret_old' : '._code.assertion:reinterpret_old',
+ '_reinterpret' : '._code.assertion:reinterpret',
+ '_reprcompare' : '._code.assertion:_reprcompare',
+ '_format_explanation' : '._code.assertion:_format_explanation',
+ },
+
+ # backports and additions of builtins
+ 'builtin' : {
+ '__doc__' : '._builtin:__doc__',
+ 'enumerate' : '._builtin:enumerate',
+ 'reversed' : '._builtin:reversed',
+ 'sorted' : '._builtin:sorted',
+ 'any' : '._builtin:any',
+ 'all' : '._builtin:all',
+ 'set' : '._builtin:set',
+ 'frozenset' : '._builtin:frozenset',
+ 'BaseException' : '._builtin:BaseException',
+ 'GeneratorExit' : '._builtin:GeneratorExit',
+ '_sysex' : '._builtin:_sysex',
+ 'print_' : '._builtin:print_',
+ '_reraise' : '._builtin:_reraise',
+ '_tryimport' : '._builtin:_tryimport',
+ 'exec_' : '._builtin:exec_',
+ '_basestring' : '._builtin:_basestring',
+ '_totext' : '._builtin:_totext',
+ '_isbytes' : '._builtin:_isbytes',
+ '_istext' : '._builtin:_istext',
+ '_getimself' : '._builtin:_getimself',
+ '_getfuncdict' : '._builtin:_getfuncdict',
+ '_getcode' : '._builtin:_getcode',
+ 'builtins' : '._builtin:builtins',
+ 'execfile' : '._builtin:execfile',
+ 'callable' : '._builtin:callable',
+ 'bytes' : '._builtin:bytes',
+ 'text' : '._builtin:text',
+ },
+
+ # input-output helping
+ 'io' : {
+ '__doc__' : '._io:__doc__',
+ 'dupfile' : '._io.capture:dupfile',
+ 'TextIO' : '._io.capture:TextIO',
+ 'BytesIO' : '._io.capture:BytesIO',
+ 'FDCapture' : '._io.capture:FDCapture',
+ 'StdCapture' : '._io.capture:StdCapture',
+ 'StdCaptureFD' : '._io.capture:StdCaptureFD',
+ 'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
+ 'ansi_print' : '._io.terminalwriter:ansi_print',
+ 'get_terminal_width' : '._io.terminalwriter:get_terminal_width',
+ 'saferepr' : '._io.saferepr:saferepr',
+ },
+
+ # small and mean xml/html generation
+ 'xml' : {
+ '__doc__' : '._xmlgen:__doc__',
+ 'html' : '._xmlgen:html',
+ 'Tag' : '._xmlgen:Tag',
+ 'raw' : '._xmlgen:raw',
+ 'Namespace' : '._xmlgen:Namespace',
+ 'escape' : '._xmlgen:escape',
+ },
+
+ 'log' : {
+ # logging API ('producers' and 'consumers' connected via keywords)
+ '__doc__' : '._log:__doc__',
+ '_apiwarn' : '._log.warning:_apiwarn',
+ 'Producer' : '._log.log:Producer',
+ 'setconsumer' : '._log.log:setconsumer',
+ '_setstate' : '._log.log:setstate',
+ '_getstate' : '._log.log:getstate',
+ 'Path' : '._log.log:Path',
+ 'STDOUT' : '._log.log:STDOUT',
+ 'STDERR' : '._log.log:STDERR',
+ 'Syslog' : '._log.log:Syslog',
+ },
+
+})
diff --git a/testing/web-platform/tests/tools/py/py/__metainfo.py b/testing/web-platform/tests/tools/py/py/__metainfo.py
new file mode 100644
index 000000000..12581eb7a
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/__metainfo.py
@@ -0,0 +1,2 @@
+import py
+pydir = py.path.local(py.__file__).dirpath()
diff --git a/testing/web-platform/tests/tools/py/py/_apipkg.py b/testing/web-platform/tests/tools/py/py/_apipkg.py
new file mode 100644
index 000000000..a73b8f6d0
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_apipkg.py
@@ -0,0 +1,181 @@
+"""
+apipkg: control the exported namespace of a python package.
+
+see http://pypi.python.org/pypi/apipkg
+
+(c) holger krekel, 2009 - MIT license
+"""
+import os
+import sys
+from types import ModuleType
+
+__version__ = '1.3.dev'
+
+def _py_abspath(path):
+ """
+ special version of abspath
+ that will leave paths from jython jars alone
+ """
+ if path.startswith('__pyclasspath__'):
+
+ return path
+ else:
+ return os.path.abspath(path)
+
+def initpkg(pkgname, exportdefs, attr=dict()):
+ """ initialize given package from the export definitions. """
+ oldmod = sys.modules.get(pkgname)
+ d = {}
+ f = getattr(oldmod, '__file__', None)
+ if f:
+ f = _py_abspath(f)
+ d['__file__'] = f
+ if hasattr(oldmod, '__version__'):
+ d['__version__'] = oldmod.__version__
+ if hasattr(oldmod, '__loader__'):
+ d['__loader__'] = oldmod.__loader__
+ if hasattr(oldmod, '__path__'):
+ d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
+ if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
+ d['__doc__'] = oldmod.__doc__
+ d.update(attr)
+ if hasattr(oldmod, "__dict__"):
+ oldmod.__dict__.update(d)
+ mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
+ sys.modules[pkgname] = mod
+
+def importobj(modpath, attrname):
+ module = __import__(modpath, None, None, ['__doc__'])
+ if not attrname:
+ return module
+
+ retval = module
+ names = attrname.split(".")
+ for x in names:
+ retval = getattr(retval, x)
+ return retval
+
+class ApiModule(ModuleType):
+ def __docget(self):
+ try:
+ return self.__doc
+ except AttributeError:
+ if '__doc__' in self.__map__:
+ return self.__makeattr('__doc__')
+ def __docset(self, value):
+ self.__doc = value
+ __doc__ = property(__docget, __docset)
+
+ def __init__(self, name, importspec, implprefix=None, attr=None):
+ self.__name__ = name
+ self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
+ self.__map__ = {}
+ self.__implprefix__ = implprefix or name
+ if attr:
+ for name, val in attr.items():
+ # print "setting", self.__name__, name, val
+ setattr(self, name, val)
+ for name, importspec in importspec.items():
+ if isinstance(importspec, dict):
+ subname = '%s.%s' % (self.__name__, name)
+ apimod = ApiModule(subname, importspec, implprefix)
+ sys.modules[subname] = apimod
+ setattr(self, name, apimod)
+ else:
+ parts = importspec.split(':')
+ modpath = parts.pop(0)
+ attrname = parts and parts[0] or ""
+ if modpath[0] == '.':
+ modpath = implprefix + modpath
+
+ if not attrname:
+ subname = '%s.%s' % (self.__name__, name)
+ apimod = AliasModule(subname, modpath)
+ sys.modules[subname] = apimod
+ if '.' not in name:
+ setattr(self, name, apimod)
+ else:
+ self.__map__[name] = (modpath, attrname)
+
+ def __repr__(self):
+ l = []
+ if hasattr(self, '__version__'):
+ l.append("version=" + repr(self.__version__))
+ if hasattr(self, '__file__'):
+ l.append('from ' + repr(self.__file__))
+ if l:
+ return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
+ return '<ApiModule %r>' % (self.__name__,)
+
+ def __makeattr(self, name):
+ """lazily compute value for name or raise AttributeError if unknown."""
+ # print "makeattr", self.__name__, name
+ target = None
+ if '__onfirstaccess__' in self.__map__:
+ target = self.__map__.pop('__onfirstaccess__')
+ importobj(*target)()
+ try:
+ modpath, attrname = self.__map__[name]
+ except KeyError:
+ if target is not None and name != '__onfirstaccess__':
+ # retry, onfirstaccess might have set attrs
+ return getattr(self, name)
+ raise AttributeError(name)
+ else:
+ result = importobj(modpath, attrname)
+ setattr(self, name, result)
+ try:
+ del self.__map__[name]
+ except KeyError:
+ pass # in a recursive-import situation a double-del can happen
+ return result
+
+ __getattr__ = __makeattr
+
+ def __dict__(self):
+ # force all the content of the module to be loaded when __dict__ is read
+ dictdescr = ModuleType.__dict__['__dict__']
+ dict = dictdescr.__get__(self)
+ if dict is not None:
+ hasattr(self, 'some')
+ for name in self.__all__:
+ try:
+ self.__makeattr(name)
+ except AttributeError:
+ pass
+ return dict
+ __dict__ = property(__dict__)
+
+
+def AliasModule(modname, modpath, attrname=None):
+ mod = []
+
+ def getmod():
+ if not mod:
+ x = importobj(modpath, None)
+ if attrname is not None:
+ x = getattr(x, attrname)
+ mod.append(x)
+ return mod[0]
+
+ class AliasModule(ModuleType):
+
+ def __repr__(self):
+ x = modpath
+ if attrname:
+ x += "." + attrname
+ return '<AliasModule %r for %r>' % (modname, x)
+
+ def __getattribute__(self, name):
+ try:
+ return getattr(getmod(), name)
+ except ImportError:
+ return None
+
+ def __setattr__(self, name, value):
+ setattr(getmod(), name, value)
+
+ def __delattr__(self, name):
+ delattr(getmod(), name)
+
+ return AliasModule(str(modname))
diff --git a/testing/web-platform/tests/tools/py/py/_builtin.py b/testing/web-platform/tests/tools/py/py/_builtin.py
new file mode 100644
index 000000000..52ee9d79c
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_builtin.py
@@ -0,0 +1,248 @@
+import sys
+
+try:
+ reversed = reversed
+except NameError:
+ def reversed(sequence):
+ """reversed(sequence) -> reverse iterator over values of the sequence
+
+ Return a reverse iterator
+ """
+ if hasattr(sequence, '__reversed__'):
+ return sequence.__reversed__()
+ if not hasattr(sequence, '__getitem__'):
+ raise TypeError("argument to reversed() must be a sequence")
+ return reversed_iterator(sequence)
+
+ class reversed_iterator(object):
+
+ def __init__(self, seq):
+ self.seq = seq
+ self.remaining = len(seq)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ i = self.remaining
+ if i > 0:
+ i -= 1
+ item = self.seq[i]
+ self.remaining = i
+ return item
+ raise StopIteration
+
+ def __length_hint__(self):
+ return self.remaining
+
+try:
+ any = any
+except NameError:
+ def any(iterable):
+ for x in iterable:
+ if x:
+ return True
+ return False
+
+try:
+ all = all
+except NameError:
+ def all(iterable):
+ for x in iterable:
+ if not x:
+ return False
+ return True
+
+try:
+ sorted = sorted
+except NameError:
+ builtin_cmp = cmp # need to use cmp as keyword arg
+
+ def sorted(iterable, cmp=None, key=None, reverse=0):
+ use_cmp = None
+ if key is not None:
+ if cmp is None:
+ def use_cmp(x, y):
+ return builtin_cmp(x[0], y[0])
+ else:
+ def use_cmp(x, y):
+ return cmp(x[0], y[0])
+ l = [(key(element), element) for element in iterable]
+ else:
+ if cmp is not None:
+ use_cmp = cmp
+ l = list(iterable)
+ if use_cmp is not None:
+ l.sort(use_cmp)
+ else:
+ l.sort()
+ if reverse:
+ l.reverse()
+ if key is not None:
+ return [element for (_, element) in l]
+ return l
+
+try:
+ set, frozenset = set, frozenset
+except NameError:
+ from sets import set, frozenset
+
+# pass through
+enumerate = enumerate
+
+try:
+ BaseException = BaseException
+except NameError:
+ BaseException = Exception
+
+try:
+ GeneratorExit = GeneratorExit
+except NameError:
+ class GeneratorExit(Exception):
+ """ This exception is never raised, it is there to make it possible to
+ write code compatible with CPython 2.5 even in lower CPython
+ versions."""
+ pass
+ GeneratorExit.__module__ = 'exceptions'
+
+_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return hasattr(obj, "__call__")
+
+if sys.version_info >= (3, 0):
+ exec ("print_ = print ; exec_=exec")
+ import builtins
+
+ # some backward compatibility helpers
+ _basestring = str
+ def _totext(obj, encoding=None, errors=None):
+ if isinstance(obj, bytes):
+ if errors is None:
+ obj = obj.decode(encoding)
+ else:
+ obj = obj.decode(encoding, errors)
+ elif not isinstance(obj, str):
+ obj = str(obj)
+ return obj
+
+ def _isbytes(x):
+ return isinstance(x, bytes)
+ def _istext(x):
+ return isinstance(x, str)
+
+ text = str
+ bytes = bytes
+
+
+ def _getimself(function):
+ return getattr(function, '__self__', None)
+
+ def _getfuncdict(function):
+ return getattr(function, "__dict__", None)
+
+ def _getcode(function):
+ return getattr(function, "__code__", None)
+
+ def execfile(fn, globs=None, locs=None):
+ if globs is None:
+ back = sys._getframe(1)
+ globs = back.f_globals
+ locs = back.f_locals
+ del back
+ elif locs is None:
+ locs = globs
+ fp = open(fn, "r")
+ try:
+ source = fp.read()
+ finally:
+ fp.close()
+ co = compile(source, fn, "exec", dont_inherit=True)
+ exec_(co, globs, locs)
+
+else:
+ import __builtin__ as builtins
+ _totext = unicode
+ _basestring = basestring
+ text = unicode
+ bytes = str
+ execfile = execfile
+ callable = callable
+ def _isbytes(x):
+ return isinstance(x, str)
+ def _istext(x):
+ return isinstance(x, unicode)
+
+ def _getimself(function):
+ return getattr(function, 'im_self', None)
+
+ def _getfuncdict(function):
+ return getattr(function, "__dict__", None)
+
+ def _getcode(function):
+ try:
+ return getattr(function, "__code__")
+ except AttributeError:
+ return getattr(function, "func_code", None)
+
+ def print_(*args, **kwargs):
+ """ minimal backport of py3k print statement. """
+ sep = ' '
+ if 'sep' in kwargs:
+ sep = kwargs.pop('sep')
+ end = '\n'
+ if 'end' in kwargs:
+ end = kwargs.pop('end')
+ file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
+ if kwargs:
+ args = ", ".join([str(x) for x in kwargs])
+ raise TypeError("invalid keyword arguments: %s" % args)
+ at_start = True
+ for x in args:
+ if not at_start:
+ file.write(sep)
+ file.write(str(x))
+ at_start = False
+ file.write(end)
+
+ def exec_(obj, globals=None, locals=None):
+ """ minimal backport of py3k exec statement. """
+ __tracebackhide__ = True
+ if globals is None:
+ frame = sys._getframe(1)
+ globals = frame.f_globals
+ if locals is None:
+ locals = frame.f_locals
+ elif locals is None:
+ locals = globals
+ exec2(obj, globals, locals)
+
+if sys.version_info >= (3, 0):
+ def _reraise(cls, val, tb):
+ __tracebackhide__ = True
+ assert hasattr(val, '__traceback__')
+ raise cls.with_traceback(val, tb)
+else:
+ exec ("""
+def _reraise(cls, val, tb):
+ __tracebackhide__ = True
+ raise cls, val, tb
+def exec2(obj, globals, locals):
+ __tracebackhide__ = True
+ exec obj in globals, locals
+""")
+
+def _tryimport(*names):
+ """ return the first successfully imported module. """
+ assert names
+ for name in names:
+ try:
+ __import__(name)
+ except ImportError:
+ excinfo = sys.exc_info()
+ else:
+ return sys.modules[name]
+ _reraise(*excinfo)
diff --git a/testing/web-platform/tests/tools/py/py/_code/__init__.py b/testing/web-platform/tests/tools/py/py/_code/__init__.py
new file mode 100644
index 000000000..f15acf851
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/__init__.py
@@ -0,0 +1 @@
+""" python inspection/code generation API """
diff --git a/testing/web-platform/tests/tools/py/py/_code/_assertionnew.py b/testing/web-platform/tests/tools/py/py/_code/_assertionnew.py
new file mode 100644
index 000000000..afb1b31ff
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/_assertionnew.py
@@ -0,0 +1,339 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace _assertionold.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from py._code.assertion import _format_explanation, BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+ # See http://bugs.jython.org/issue1497
+ _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+ "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+ "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+ "List", "Tuple")
+ _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+ "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+ "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+ "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+ _expr_nodes = set(getattr(ast, name) for name in _exprs)
+ _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+ def _is_ast_expr(node):
+ return node.__class__ in _expr_nodes
+ def _is_ast_stmt(node):
+ return node.__class__ in _stmt_nodes
+else:
+ def _is_ast_expr(node):
+ return isinstance(node, ast.expr)
+ def _is_ast_stmt(node):
+ return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+ """Error found while interpreting AST."""
+
+ def __init__(self, explanation=""):
+ self.cause = sys.exc_info()
+ self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+ mod = ast.parse(source)
+ visitor = DebugInterpreter(frame)
+ try:
+ visitor.visit(mod)
+ except Failure:
+ failure = sys.exc_info()[1]
+ return getfailure(failure)
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+ if frame is None:
+ frame = py.code.Frame(sys._getframe(1))
+ return interpret(offending_line, frame)
+
+def getfailure(failure):
+ explanation = _format_explanation(failure.explanation)
+ value = failure.cause[1]
+ if str(value):
+ lines = explanation.splitlines()
+ if not lines:
+ lines.append("")
+ lines[0] += " << %s" % (value,)
+ explanation = "\n".join(lines)
+ text = "%s: %s" % (failure.cause[0].__name__, explanation)
+ if text.startswith("AssertionError: assert "):
+ text = text[16:]
+ return text
+
+
+operator_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Pow : "**",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+ """Interpret AST nodes to gleam useful debugging information. """
+
+ def __init__(self, frame):
+ self.frame = frame
+
+ def generic_visit(self, node):
+ # Fallback when we don't have a special implementation.
+ if _is_ast_expr(node):
+ mod = ast.Expression(node)
+ co = self._compile(mod)
+ try:
+ result = self.frame.eval(co)
+ except Exception:
+ raise Failure()
+ explanation = self.frame.repr(result)
+ return explanation, result
+ elif _is_ast_stmt(node):
+ mod = ast.Module([node])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co)
+ except Exception:
+ raise Failure()
+ return None, None
+ else:
+ raise AssertionError("can't handle %s" %(node,))
+
+ def _compile(self, source, mode="eval"):
+ return compile(source, "<assertion interpretation>", mode)
+
+ def visit_Expr(self, expr):
+ return self.visit(expr.value)
+
+ def visit_Module(self, mod):
+ for stmt in mod.body:
+ self.visit(stmt)
+
+ def visit_Name(self, name):
+ explanation, result = self.generic_visit(name)
+ # See if the name is local.
+ source = "%r in locals() is not globals()" % (name.id,)
+ co = self._compile(source)
+ try:
+ local = self.frame.eval(co)
+ except Exception:
+ # have to assume it isn't
+ local = False
+ if not local:
+ return name.id, result
+ return explanation, result
+
+ def visit_Compare(self, comp):
+ left = comp.left
+ left_explanation, left_result = self.visit(left)
+ for op, next_op in zip(comp.ops, comp.comparators):
+ next_explanation, next_result = self.visit(next_op)
+ op_symbol = operator_map[op.__class__]
+ explanation = "%s %s %s" % (left_explanation, op_symbol,
+ next_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=next_result)
+ except Exception:
+ raise Failure(explanation)
+ try:
+ if not result:
+ break
+ except KeyboardInterrupt:
+ raise
+ except:
+ break
+ left_explanation, left_result = next_explanation, next_result
+
+ rcomp = py.code._reprcompare
+ if rcomp:
+ res = rcomp(op_symbol, left_result, next_result)
+ if res:
+ explanation = res
+ return explanation, result
+
+ def visit_BoolOp(self, boolop):
+ is_or = isinstance(boolop.op, ast.Or)
+ explanations = []
+ for operand in boolop.values:
+ explanation, result = self.visit(operand)
+ explanations.append(explanation)
+ if result == is_or:
+ break
+ name = is_or and " or " or " and "
+ explanation = "(" + name.join(explanations) + ")"
+ return explanation, result
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_explanation, operand_result = self.visit(unary.operand)
+ explanation = pattern % (operand_explanation,)
+ co = self._compile(pattern % ("__exprinfo_expr",))
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=operand_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_BinOp(self, binop):
+ left_explanation, left_result = self.visit(binop.left)
+ right_explanation, right_result = self.visit(binop.right)
+ symbol = operator_map[binop.op.__class__]
+ explanation = "(%s %s %s)" % (left_explanation, symbol,
+ right_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=right_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_Call(self, call):
+ func_explanation, func = self.visit(call.func)
+ arg_explanations = []
+ ns = {"__exprinfo_func" : func}
+ arguments = []
+ for arg in call.args:
+ arg_explanation, arg_result = self.visit(arg)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ arguments.append(arg_name)
+ arg_explanations.append(arg_explanation)
+ for keyword in call.keywords:
+ arg_explanation, arg_result = self.visit(keyword.value)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ keyword_source = "%s=%%s" % (keyword.arg)
+ arguments.append(keyword_source % (arg_name,))
+ arg_explanations.append(keyword_source % (arg_explanation,))
+ if call.starargs:
+ arg_explanation, arg_result = self.visit(call.starargs)
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+ if call.kwargs:
+ arg_explanation, arg_result = self.visit(call.kwargs)
+ arg_name = "__exprinfo_kwds"
+ ns[arg_name] = arg_result
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+ args_explained = ", ".join(arg_explanations)
+ explanation = "%s(%s)" % (func_explanation, args_explained)
+ args = ", ".join(arguments)
+ source = "__exprinfo_func(%s)" % (args,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, **ns)
+ except Exception:
+ raise Failure(explanation)
+ pattern = "%s\n{%s = %s\n}"
+ rep = self.frame.repr(result)
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def _is_builtin_name(self, name):
+ pattern = "%r not in globals() and %r not in locals()"
+ source = pattern % (name.id, name.id)
+ co = self._compile(source)
+ try:
+ return self.frame.eval(co)
+ except Exception:
+ return False
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ source_explanation, source_result = self.visit(attr.value)
+ explanation = "%s.%s" % (source_explanation, attr.attr)
+ source = "__exprinfo_expr.%s" % (attr.attr,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ raise Failure(explanation)
+ explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+ self.frame.repr(result),
+ source_explanation, attr.attr)
+ # Check if the attr is from an instance.
+ source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+ source = source % (attr.attr,)
+ co = self._compile(source)
+ try:
+ from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ from_instance = True
+ if from_instance:
+ rep = self.frame.repr(result)
+ pattern = "%s\n{%s = %s\n}"
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def visit_Assert(self, assrt):
+ test_explanation, test_result = self.visit(assrt.test)
+ if test_explanation.startswith("False\n{False =") and \
+ test_explanation.endswith("\n"):
+ test_explanation = test_explanation[15:-2]
+ explanation = "assert %s" % (test_explanation,)
+ if not test_result:
+ try:
+ raise BuiltinAssertionError
+ except Exception:
+ raise Failure(explanation)
+ return explanation, test_result
+
+ def visit_Assign(self, assign):
+ value_explanation, value_result = self.visit(assign.value)
+ explanation = "... = %s" % (value_explanation,)
+ name = ast.Name("__exprinfo_expr", ast.Load(),
+ lineno=assign.value.lineno,
+ col_offset=assign.value.col_offset)
+ new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+ col_offset=assign.col_offset)
+ mod = ast.Module([new_assign])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co, __exprinfo_expr=value_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, value_result
diff --git a/testing/web-platform/tests/tools/py/py/_code/_assertionold.py b/testing/web-platform/tests/tools/py/py/_code/_assertionold.py
new file mode 100644
index 000000000..4e81fb3ef
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/_assertionold.py
@@ -0,0 +1,555 @@
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from py._code.assertion import BuiltinAssertionError, _format_explanation
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+ def __init__(self, node):
+ self.exc, self.value, self.tb = sys.exc_info()
+ self.node = node
+
+class View(object):
+ """View base class.
+
+ If C is a subclass of View, then C(x) creates a proxy object around
+ the object x. The actual class of the proxy is not C in general,
+ but a *subclass* of C determined by the rules below. To avoid confusion
+ we call view class the class of the proxy (a subclass of C, so of View)
+ and object class the class of x.
+
+ Attributes and methods not found in the proxy are automatically read on x.
+ Other operations like setting attributes are performed on the proxy, as
+ determined by its view class. The object x is available from the proxy
+ as its __obj__ attribute.
+
+ The view class selection is determined by the __view__ tuples and the
+ optional __viewkey__ method. By default, the selected view class is the
+ most specific subclass of C whose __view__ mentions the class of x.
+ If no such subclass is found, the search proceeds with the parent
+ object classes. For example, C(True) will first look for a subclass
+ of C with __view__ = (..., bool, ...) and only if it doesn't find any
+ look for one with __view__ = (..., int, ...), and then ..., object,...
+ If everything fails the class C itself is considered to be the default.
+
+ Alternatively, the view class selection can be driven by another aspect
+ of the object x, instead of the class of x, by overriding __viewkey__.
+ See last example at the end of this module.
+ """
+
+ _viewcache = {}
+ __view__ = ()
+
+ def __new__(rootclass, obj, *args, **kwds):
+ self = object.__new__(rootclass)
+ self.__obj__ = obj
+ self.__rootclass__ = rootclass
+ key = self.__viewkey__()
+ try:
+ self.__class__ = self._viewcache[key]
+ except KeyError:
+ self.__class__ = self._selectsubclass(key)
+ return self
+
+ def __getattr__(self, attr):
+ # attributes not found in the normal hierarchy rooted on View
+ # are looked up in the object's real class
+ return getattr(self.__obj__, attr)
+
+ def __viewkey__(self):
+ return self.__obj__.__class__
+
+ def __matchkey__(self, key, subclasses):
+ if inspect.isclass(key):
+ keys = inspect.getmro(key)
+ else:
+ keys = [key]
+ for key in keys:
+ result = [C for C in subclasses if key in C.__view__]
+ if result:
+ return result
+ return []
+
+ def _selectsubclass(self, key):
+ subclasses = list(enumsubclasses(self.__rootclass__))
+ for C in subclasses:
+ if not isinstance(C.__view__, tuple):
+ C.__view__ = (C.__view__,)
+ choices = self.__matchkey__(key, subclasses)
+ if not choices:
+ return self.__rootclass__
+ elif len(choices) == 1:
+ return choices[0]
+ else:
+ # combine the multiple choices
+ return type('?', tuple(choices), {})
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+ for subcls in cls.__subclasses__():
+ for subsubclass in enumsubclasses(subcls):
+ yield subsubclass
+ yield cls
+
+
+class Interpretable(View):
+ """A parse tree node with a few extra methods."""
+ explanation = None
+
+ def is_builtin(self, frame):
+ return False
+
+ def eval(self, frame):
+ # fall-back for unknown expression nodes
+ try:
+ expr = ast.Expression(self.__obj__)
+ expr.filename = '<eval>'
+ self.__obj__.filename = '<eval>'
+ co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+ result = frame.eval(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.result = result
+ self.explanation = self.explanation or frame.repr(self.result)
+
+ def run(self, frame):
+ # fall-back for unknown statement nodes
+ try:
+ expr = ast.Module(None, ast.Stmt([self.__obj__]))
+ expr.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(expr).getCode()
+ frame.exec_(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ def nice_explanation(self):
+ return _format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+ __view__ = ast.Name
+
+ def is_local(self, frame):
+ source = '%r in locals() is not globals()' % self.name
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_global(self, frame):
+ source = '%r in globals()' % self.name
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_builtin(self, frame):
+ source = '%r not in locals() and %r not in globals()' % (
+ self.name, self.name)
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ super(Name, self).eval(frame)
+ if not self.is_local(frame):
+ self.explanation = self.name
+
+class Compare(Interpretable):
+ __view__ = ast.Compare
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ for operation, expr2 in self.ops:
+ if hasattr(self, 'result'):
+ # shortcutting in chained expressions
+ if not frame.is_true(self.result):
+ break
+ expr2 = Interpretable(expr2)
+ expr2.eval(frame)
+ self.explanation = "%s %s %s" % (
+ expr.explanation, operation, expr2.explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % operation
+ try:
+ self.result = frame.eval(source,
+ __exprinfo_left=expr.result,
+ __exprinfo_right=expr2.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ expr = expr2
+
+class And(Interpretable):
+ __view__ = ast.And
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if not frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+ __view__ = ast.Or
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+ ast.Not : 'not __exprinfo_expr',
+ ast.Invert : '(~__exprinfo_expr)',
+ }.items():
+
+ class UnaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.explanation = astpattern.replace('__exprinfo_expr',
+ expr.explanation)
+ try:
+ self.result = frame.eval(astpattern,
+ __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+ ast.Add : '(__exprinfo_left + __exprinfo_right)',
+ ast.Sub : '(__exprinfo_left - __exprinfo_right)',
+ ast.Mul : '(__exprinfo_left * __exprinfo_right)',
+ ast.Div : '(__exprinfo_left / __exprinfo_right)',
+ ast.Mod : '(__exprinfo_left % __exprinfo_right)',
+ ast.Power : '(__exprinfo_left ** __exprinfo_right)',
+ }.items():
+
+ class BinaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern):
+ left = Interpretable(self.left)
+ left.eval(frame)
+ right = Interpretable(self.right)
+ right.eval(frame)
+ self.explanation = (astpattern
+ .replace('__exprinfo_left', left .explanation)
+ .replace('__exprinfo_right', right.explanation))
+ try:
+ self.result = frame.eval(astpattern,
+ __exprinfo_left=left.result,
+ __exprinfo_right=right.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+ __view__ = ast.CallFunc
+
+ def is_bool(self, frame):
+ source = 'isinstance(__exprinfo_value, bool)'
+ try:
+ return frame.is_true(frame.eval(source,
+ __exprinfo_value=self.result))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ node = Interpretable(self.node)
+ node.eval(frame)
+ explanations = []
+ vars = {'__exprinfo_fn': node.result}
+ source = '__exprinfo_fn('
+ for a in self.args:
+ if isinstance(a, ast.Keyword):
+ keyword = a.name
+ a = a.expr
+ else:
+ keyword = None
+ a = Interpretable(a)
+ a.eval(frame)
+ argname = '__exprinfo_%d' % len(vars)
+ vars[argname] = a.result
+ if keyword is None:
+ source += argname + ','
+ explanations.append(a.explanation)
+ else:
+ source += '%s=%s,' % (keyword, argname)
+ explanations.append('%s=%s' % (keyword, a.explanation))
+ if self.star_args:
+ star_args = Interpretable(self.star_args)
+ star_args.eval(frame)
+ argname = '__exprinfo_star'
+ vars[argname] = star_args.result
+ source += '*' + argname + ','
+ explanations.append('*' + star_args.explanation)
+ if self.dstar_args:
+ dstar_args = Interpretable(self.dstar_args)
+ dstar_args.eval(frame)
+ argname = '__exprinfo_kwds'
+ vars[argname] = dstar_args.result
+ source += '**' + argname + ','
+ explanations.append('**' + dstar_args.explanation)
+ self.explanation = "%s(%s)" % (
+ node.explanation, ', '.join(explanations))
+ if source.endswith(','):
+ source = source[:-1]
+ source += ')'
+ try:
+ self.result = frame.eval(source, **vars)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ if not node.is_builtin(frame) or not self.is_bool(frame):
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+ __view__ = ast.Getattr
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ source = '__exprinfo_expr.%s' % self.attrname
+ try:
+ self.result = frame.eval(source, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+ # if the attribute comes from the instance, its value is interesting
+ source = ('hasattr(__exprinfo_expr, "__dict__") and '
+ '%r in __exprinfo_expr.__dict__' % self.attrname)
+ try:
+ from_instance = frame.is_true(
+ frame.eval(source, __exprinfo_expr=expr.result))
+ except passthroughex:
+ raise
+ except:
+ from_instance = True
+ if from_instance:
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+ __view__ = ast.Assert
+
+ def run(self, frame):
+ test = Interpretable(self.test)
+ test.eval(frame)
+ # simplify 'assert False where False = ...'
+ if (test.explanation.startswith('False\n{False = ') and
+ test.explanation.endswith('\n}')):
+ test.explanation = test.explanation[15:-2]
+ # print the result as 'assert <explanation>'
+ self.result = test.result
+ self.explanation = 'assert ' + test.explanation
+ if not frame.is_true(test.result):
+ try:
+ raise BuiltinAssertionError
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Assign(Interpretable):
+ __view__ = ast.Assign
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = '... = ' + expr.explanation
+ # fall-back-run the rest of the assignment
+ ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+ mod = ast.Module(None, ast.Stmt([ass]))
+ mod.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(mod).getCode()
+ try:
+ frame.exec_(co, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Discard(Interpretable):
+ __view__ = ast.Discard
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+ __view__ = ast.Stmt
+
+ def run(self, frame):
+ for stmt in self.nodes:
+ stmt = Interpretable(stmt)
+ stmt.run(frame)
+
+
+def report_failure(e):
+ explanation = e.node.nice_explanation()
+ if explanation:
+ explanation = ", in: " + explanation
+ else:
+ explanation = ""
+ sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ expr = parse(s, 'eval')
+ assert isinstance(expr, ast.Expression)
+ node = Interpretable(expr.node)
+ try:
+ node.eval(frame)
+ except passthroughex:
+ raise
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+ else:
+ if not frame.is_true(node.result):
+ sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+ module = Interpretable(parse(source, 'exec').node)
+ #print "got module", module
+ if isinstance(frame, py.std.types.FrameType):
+ frame = py.code.Frame(frame)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ return getfailure(e)
+ except passthroughex:
+ raise
+ except:
+ import traceback
+ traceback.print_exc()
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --nomagic)")
+ else:
+ return None
+
+def getmsg(excinfo):
+ if isinstance(excinfo, tuple):
+ excinfo = py.code.ExceptionInfo(excinfo)
+ #frame, line = gettbline(tb)
+ #frame = py.code.Frame(frame)
+ #return interpret(line, frame)
+
+ tb = excinfo.traceback[-1]
+ source = str(tb.statement).strip()
+ x = interpret(source, tb.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ return x
+
+def getfailure(e):
+ explanation = e.node.nice_explanation()
+ if str(e.value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (e.value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.exc.__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+def run(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ module = Interpretable(parse(s, 'exec').node)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+
+
+if __name__ == '__main__':
+ # example:
+ def f():
+ return 5
+ def g():
+ return 3
+ def h(x):
+ return 'never'
+ check("f() * g() == 5")
+ check("not f()")
+ check("not (f() and g() or 0)")
+ check("f() == g()")
+ i = 4
+ check("i == f()")
+ check("len(f()) == 0")
+ check("isinstance(2+3+4, float)")
+
+ run("x = i")
+ check("x == 5")
+
+ run("assert not f(), 'oops'")
+ run("a, b, c = 1, 2")
+ run("a, b, c = f()")
+
+ check("max([f(),g()]) == 4")
+ check("'hello'[g()] == 'h'")
+ run("'guk%d' % h(f())")
diff --git a/testing/web-platform/tests/tools/py/py/_code/_py2traceback.py b/testing/web-platform/tests/tools/py/py/_code/_py2traceback.py
new file mode 100644
index 000000000..d65e27cb7
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/_py2traceback.py
@@ -0,0 +1,79 @@
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.last_type and sys.last_value. The return value is a list of
+ strings, each ending in a newline.
+
+ Normally, the list contains a single string; however, for
+ SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax
+ error occurred.
+
+ The message indicating which exception occurred is always the last
+ string in the list.
+
+ """
+
+ # An instance should not have a meaningful value parameter, but
+ # sometimes does, particularly for string exceptions, such as
+ # >>> raise string1, string2 # deprecated
+ #
+ # Clear these out first because issubtype(string1, SyntaxError)
+ # would throw another exception and mask the original problem.
+ if (isinstance(etype, BaseException) or
+ isinstance(etype, types.InstanceType) or
+ etype is None or type(etype) is str):
+ return [_format_final_exc_line(etype, value)]
+
+ stype = etype.__name__
+
+ if not issubclass(etype, SyntaxError):
+ return [_format_final_exc_line(stype, value)]
+
+ # It was a syntax error; show exactly where the problem was found.
+ lines = []
+ try:
+ msg, (filename, lineno, offset, badline) = value.args
+ except Exception:
+ pass
+ else:
+ filename = filename or "<string>"
+ lines.append(' File "%s", line %d\n' % (filename, lineno))
+ if badline is not None:
+ lines.append(' %s\n' % badline.strip())
+ if offset is not None:
+ caretspace = badline.rstrip('\n')[:offset].lstrip()
+ # non-space whitespace (likes tabs) must be kept for alignment
+ caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+ # only three spaces to account for offset1 == pos 0
+ lines.append(' %s^\n' % ''.join(caretspace))
+ value = msg
+
+ lines.append(_format_final_exc_line(stype, value))
+ return lines
+
+def _format_final_exc_line(etype, value):
+ """Return a list of a single line -- normal case for format_exception_only"""
+ valuestr = _some_str(value)
+ if value is None or not valuestr:
+ line = "%s\n" % etype
+ else:
+ line = "%s: %s\n" % (etype, valuestr)
+ return line
+
+def _some_str(value):
+ try:
+ return unicode(value)
+ except Exception:
+ try:
+ return str(value)
+ except Exception:
+ pass
+ return '<unprintable %s object>' % type(value).__name__
diff --git a/testing/web-platform/tests/tools/py/py/_code/assertion.py b/testing/web-platform/tests/tools/py/py/_code/assertion.py
new file mode 100644
index 000000000..4ce80c75b
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/assertion.py
@@ -0,0 +1,94 @@
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+_reprcompare = None # if set, will be called by assert reinterp for comparison ops
+
+def _format_explanation(explanation):
+ """This formats an explanation
+
+ Normally all embedded newlines are escaped, however there are
+ three exceptions: \n{, \n} and \n~. The first two are intended
+ cover nested explanations, see function and attribute explanations
+ for examples (.visit_Call(), visit_Attribute()). The last one is
+ for when one explanation needs to span multiple lines, e.g. when
+ displaying diffs.
+ """
+ raw_lines = (explanation or '').split('\n')
+ # escape newlines not followed by {, } and ~
+ lines = [raw_lines[0]]
+ for l in raw_lines[1:]:
+ if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+ lines.append(l)
+ else:
+ lines[-1] += '\\n' + l
+
+ result = lines[:1]
+ stack = [0]
+ stackcnt = [0]
+ for line in lines[1:]:
+ if line.startswith('{'):
+ if stackcnt[-1]:
+ s = 'and '
+ else:
+ s = 'where '
+ stack.append(len(result))
+ stackcnt[-1] += 1
+ stackcnt.append(0)
+ result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ elif line.startswith('}'):
+ assert line.startswith('}')
+ stack.pop()
+ stackcnt.pop()
+ result[stack[-1]] += line[1:]
+ else:
+ assert line.startswith('~')
+ result.append(' '*len(stack) + line[1:])
+ assert len(stack) == 1
+ return '\n'.join(result)
+
+
+class AssertionError(BuiltinAssertionError):
+ def __init__(self, *args):
+ BuiltinAssertionError.__init__(self, *args)
+ if args:
+ try:
+ self.msg = str(args[0])
+ except py.builtin._sysex:
+ raise
+ except:
+ self.msg = "<[broken __repr__] %s at %0xd>" %(
+ args[0].__class__, id(args[0]))
+ else:
+ f = py.code.Frame(sys._getframe(1))
+ try:
+ source = f.code.fullsource
+ if source is not None:
+ try:
+ source = source.getstatement(f.lineno, assertion=True)
+ except IndexError:
+ source = None
+ else:
+ source = str(source.deindent()).strip()
+ except py.error.ENOENT:
+ source = None
+ # this can also occur during reinterpretation, when the
+ # co_filename is set to "<run>".
+ if source:
+ self.msg = reinterpret(source, f, should_fail=True)
+ else:
+ self.msg = "<could not determine information>"
+ if not self.args:
+ self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+ AssertionError.__module__ = "builtins"
+ reinterpret_old = "old reinterpretation not available for py3"
+else:
+ from py._code._assertionold import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+ from py._code._assertionnew import interpret as reinterpret
+else:
+ reinterpret = reinterpret_old
+
diff --git a/testing/web-platform/tests/tools/py/py/_code/code.py b/testing/web-platform/tests/tools/py/py/_code/code.py
new file mode 100644
index 000000000..f14c562a2
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/code.py
@@ -0,0 +1,787 @@
+import py
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+ from traceback import format_exception_only
+else:
+ from py._code._py2traceback import format_exception_only
+
+class Code(object):
+ """ wrapper around Python code objects """
+ def __init__(self, rawcode):
+ if not hasattr(rawcode, "co_filename"):
+ rawcode = py.code.getrawcode(rawcode)
+ try:
+ self.filename = rawcode.co_filename
+ self.firstlineno = rawcode.co_firstlineno - 1
+ self.name = rawcode.co_name
+ except AttributeError:
+ raise TypeError("not a code object: %r" %(rawcode,))
+ self.raw = rawcode
+
+ def __eq__(self, other):
+ return self.raw == other.raw
+
+ def __ne__(self, other):
+ return not self == other
+
+ @property
+ def path(self):
+ """ return a path object pointing to source code (note that it
+ might not point to an actually existing file). """
+ p = py.path.local(self.raw.co_filename)
+ # maybe don't try this checking
+ if not p.check():
+ # XXX maybe try harder like the weird logic
+ # in the standard lib [linecache.updatecache] does?
+ p = self.raw.co_filename
+ return p
+
+ @property
+ def fullsource(self):
+ """ return a py.code.Source object for the full source file of the code
+ """
+ from py._code import source
+ full, _ = source.findsource(self.raw)
+ return full
+
+ def source(self):
+ """ return a py.code.Source object for the code object's source only
+ """
+ # return source only for that part of code
+ return py.code.Source(self.raw)
+
+ def getargs(self, var=False):
+ """ return a tuple with the argument names for the code object
+
+ if 'var' is set True also return the names of the variable and
+ keyword arguments when present
+ """
+ # handfull shortcut for getting args
+ raw = self.raw
+ argcount = raw.co_argcount
+ if var:
+ argcount += raw.co_flags & CO_VARARGS
+ argcount += raw.co_flags & CO_VARKEYWORDS
+ return raw.co_varnames[:argcount]
+
+class Frame(object):
+ """Wrapper around a Python frame holding f_locals and f_globals
+ in which expressions can be evaluated."""
+
+ def __init__(self, frame):
+ self.lineno = frame.f_lineno - 1
+ self.f_globals = frame.f_globals
+ self.f_locals = frame.f_locals
+ self.raw = frame
+ self.code = py.code.Code(frame.f_code)
+
+ @property
+ def statement(self):
+ """ statement this frame is at """
+ if self.code.fullsource is None:
+ return py.code.Source("")
+ return self.code.fullsource.getstatement(self.lineno)
+
+ def eval(self, code, **vars):
+ """ evaluate 'code' in the frame
+
+ 'vars' are optional additional local variables
+
+ returns the result of the evaluation
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ return eval(code, self.f_globals, f_locals)
+
+ def exec_(self, code, **vars):
+ """ exec 'code' in the frame
+
+ 'vars' are optiona; additional local variables
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ py.builtin.exec_(code, self.f_globals, f_locals )
+
+ def repr(self, object):
+ """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+ """
+ return py.io.saferepr(object)
+
+ def is_true(self, object):
+ return object
+
+ def getargs(self, var=False):
+ """ return a list of tuples (name, value) for all arguments
+
+ if 'var' is set True also include the variable and keyword
+ arguments when present
+ """
+ retval = []
+ for arg in self.code.getargs(var):
+ try:
+ retval.append((arg, self.f_locals[arg]))
+ except KeyError:
+ pass # this can occur when using Psyco
+ return retval
+
+class TracebackEntry(object):
+ """ a single entry in a traceback """
+
+ _repr_style = None
+ exprinfo = None
+
+ def __init__(self, rawentry):
+ self._rawentry = rawentry
+ self.lineno = rawentry.tb_lineno - 1
+
+ def set_repr_style(self, mode):
+ assert mode in ("short", "long")
+ self._repr_style = mode
+
+ @property
+ def frame(self):
+ return py.code.Frame(self._rawentry.tb_frame)
+
+ @property
+ def relline(self):
+ return self.lineno - self.frame.code.firstlineno
+
+ def __repr__(self):
+ return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+ @property
+ def statement(self):
+ """ py.code.Source object for the current statement """
+ source = self.frame.code.fullsource
+ return source.getstatement(self.lineno)
+
+ @property
+ def path(self):
+ """ path to the source code """
+ return self.frame.code.path
+
+ def getlocals(self):
+ return self.frame.f_locals
+ locals = property(getlocals, None, None, "locals of underlaying frame")
+
+ def reinterpret(self):
+ """Reinterpret the failing statement and returns a detailed information
+ about what operations are performed."""
+ if self.exprinfo is None:
+ source = str(self.statement).strip()
+ x = py.code._reinterpret(source, self.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ self.exprinfo = x
+ return self.exprinfo
+
+ def getfirstlinesource(self):
+ # on Jython this firstlineno can be -1 apparently
+ return max(self.frame.code.firstlineno, 0)
+
+ def getsource(self, astcache=None):
+ """ return failing source code. """
+ # we use the passed in astcache to not reparse asttrees
+ # within exception info printing
+ from py._code.source import getstatementrange_ast
+ source = self.frame.code.fullsource
+ if source is None:
+ return None
+ key = astnode = None
+ if astcache is not None:
+ key = self.frame.code.path
+ if key is not None:
+ astnode = astcache.get(key, None)
+ start = self.getfirstlinesource()
+ try:
+ astnode, _, end = getstatementrange_ast(self.lineno, source,
+ astnode=astnode)
+ except SyntaxError:
+ end = self.lineno + 1
+ else:
+ if key is not None:
+ astcache[key] = astnode
+ return source[start:end]
+
+ source = property(getsource)
+
+ def ishidden(self):
+ """ return True if the current frame has a var __tracebackhide__
+ resolving to True
+
+ mostly for internal use
+ """
+ try:
+ return self.frame.f_locals['__tracebackhide__']
+ except KeyError:
+ try:
+ return self.frame.f_globals['__tracebackhide__']
+ except KeyError:
+ return False
+
+ def __str__(self):
+ try:
+ fn = str(self.path)
+ except py.error.Error:
+ fn = '???'
+ name = self.frame.code.name
+ try:
+ line = str(self.statement).lstrip()
+ except KeyboardInterrupt:
+ raise
+ except:
+ line = "???"
+ return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
+
+ def name(self):
+ return self.frame.code.raw.co_name
+ name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+ """ Traceback objects encapsulate and offer higher level
+ access to Traceback entries.
+ """
+ Entry = TracebackEntry
+ def __init__(self, tb):
+ """ initialize from given python traceback object. """
+ if hasattr(tb, 'tb_next'):
+ def f(cur):
+ while cur is not None:
+ yield self.Entry(cur)
+ cur = cur.tb_next
+ list.__init__(self, f(tb))
+ else:
+ list.__init__(self, tb)
+
+ def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+ """ return a Traceback instance wrapping part of this Traceback
+
+ by provding any combination of path, lineno and firstlineno, the
+ first frame to start the to-be-returned traceback is determined
+
+ this allows cutting the first part of a Traceback instance e.g.
+ for formatting reasons (removing some uninteresting bits that deal
+ with handling of the exception/traceback)
+ """
+ for x in self:
+ code = x.frame.code
+ codepath = code.path
+ if ((path is None or codepath == path) and
+ (excludepath is None or not hasattr(codepath, 'relto') or
+ not codepath.relto(excludepath)) and
+ (lineno is None or x.lineno == lineno) and
+ (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+ return Traceback(x._rawentry)
+ return self
+
+ def __getitem__(self, key):
+ val = super(Traceback, self).__getitem__(key)
+ if isinstance(key, type(slice(0))):
+ val = self.__class__(val)
+ return val
+
+ def filter(self, fn=lambda x: not x.ishidden()):
+ """ return a Traceback instance with certain items removed
+
+ fn is a function that gets a single argument, a TracebackItem
+ instance, and should return True when the item should be added
+ to the Traceback, False when not
+
+ by default this removes all the TracebackItems which are hidden
+ (see ishidden() above)
+ """
+ return Traceback(filter(fn, self))
+
+ def getcrashentry(self):
+ """ return last non-hidden traceback entry that lead
+ to the exception of a traceback.
+ """
+ for i in range(-1, -len(self)-1, -1):
+ entry = self[i]
+ if not entry.ishidden():
+ return entry
+ return self[-1]
+
+ def recursionindex(self):
+ """ return the index of the frame/TracebackItem where recursion
+ originates if appropriate, None if no recursion occurred
+ """
+ cache = {}
+ for i, entry in enumerate(self):
+ # id for the code.raw is needed to work around
+ # the strange metaprogramming in the decorator lib from pypi
+ # which generates code objects that have hash/value equality
+ #XXX needs a test
+ key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+ #print "checking for recursion at", key
+ l = cache.setdefault(key, [])
+ if l:
+ f = entry.frame
+ loc = f.f_locals
+ for otherloc in l:
+ if f.is_true(f.eval(co_equal,
+ __recursioncache_locals_1=loc,
+ __recursioncache_locals_2=otherloc)):
+ return i
+ l.append(entry.frame.f_locals)
+ return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+ '?', 'eval')
+
+class ExceptionInfo(object):
+ """ wraps sys.exc_info() objects and offers
+ help for navigating the traceback.
+ """
+ _striptext = ''
+ def __init__(self, tup=None, exprinfo=None):
+ if tup is None:
+ tup = sys.exc_info()
+ if exprinfo is None and isinstance(tup[1], AssertionError):
+ exprinfo = getattr(tup[1], 'msg', None)
+ if exprinfo is None:
+ exprinfo = str(tup[1])
+ if exprinfo and exprinfo.startswith('assert '):
+ self._striptext = 'AssertionError: '
+ self._excinfo = tup
+ #: the exception class
+ self.type = tup[0]
+ #: the exception instance
+ self.value = tup[1]
+ #: the exception raw traceback
+ self.tb = tup[2]
+ #: the exception type name
+ self.typename = self.type.__name__
+ #: the exception traceback (py.code.Traceback instance)
+ self.traceback = py.code.Traceback(self.tb)
+
+ def __repr__(self):
+ return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+ def exconly(self, tryshort=False):
+ """ return the exception as a string
+
+ when 'tryshort' resolves to True, and the exception is a
+ py.code._AssertionError, only the actual exception part of
+ the exception representation is returned (so 'AssertionError: ' is
+ removed from the beginning)
+ """
+ lines = format_exception_only(self.type, self.value)
+ text = ''.join(lines)
+ text = text.rstrip()
+ if tryshort:
+ if text.startswith(self._striptext):
+ text = text[len(self._striptext):]
+ return text
+
+ def errisinstance(self, exc):
+ """ return True if the exception is an instance of exc """
+ return isinstance(self.value, exc)
+
+ def _getreprcrash(self):
+ exconly = self.exconly(tryshort=True)
+ entry = self.traceback.getcrashentry()
+ path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+ return ReprFileLocation(path, lineno+1, exconly)
+
+ def getrepr(self, showlocals=False, style="long",
+ abspath=False, tbfilter=True, funcargs=False):
+ """ return str()able representation of this exception info.
+ showlocals: show locals per traceback entry
+ style: long|short|no|native traceback style
+ tbfilter: hide entries (where __tracebackhide__ is true)
+
+ in case of style==native, tbfilter and showlocals is ignored.
+ """
+ if style == 'native':
+ return ReprExceptionInfo(ReprTracebackNative(
+ py.std.traceback.format_exception(
+ self.type,
+ self.value,
+ self.traceback[0]._rawentry,
+ )), self._getreprcrash())
+
+ fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+ abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+ return fmt.repr_excinfo(self)
+
+ def __str__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return str(loc)
+
+ def __unicode__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return unicode(loc)
+
+
+class FormattedExcinfo(object):
+ """ presenting information about failing Functions and Generators. """
+ # for traceback entries
+ flow_marker = ">"
+ fail_marker = "E"
+
+ def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+ self.showlocals = showlocals
+ self.style = style
+ self.tbfilter = tbfilter
+ self.funcargs = funcargs
+ self.abspath = abspath
+ self.astcache = {}
+
+ def _getindent(self, source):
+ # figure out indent for given source
+ try:
+ s = str(source.getstatement(len(source)-1))
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ s = str(source[-1])
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 0
+ return 4 + (len(s) - len(s.lstrip()))
+
+ def _getentrysource(self, entry):
+ source = entry.getsource(self.astcache)
+ if source is not None:
+ source = source.deindent()
+ return source
+
+ def _saferepr(self, obj):
+ return py.io.saferepr(obj)
+
+ def repr_args(self, entry):
+ if self.funcargs:
+ args = []
+ for argname, argvalue in entry.frame.getargs(var=True):
+ args.append((argname, self._saferepr(argvalue)))
+ return ReprFuncArgs(args)
+
+ def get_source(self, source, line_index=-1, excinfo=None, short=False):
+ """ return formatted and marked up source lines. """
+ lines = []
+ if source is None or line_index >= len(source.lines):
+ source = py.code.Source("???")
+ line_index = 0
+ if line_index < 0:
+ line_index += len(source)
+ space_prefix = " "
+ if short:
+ lines.append(space_prefix + source.lines[line_index].strip())
+ else:
+ for line in source.lines[:line_index]:
+ lines.append(space_prefix + line)
+ lines.append(self.flow_marker + " " + source.lines[line_index])
+ for line in source.lines[line_index+1:]:
+ lines.append(space_prefix + line)
+ if excinfo is not None:
+ indent = 4 if short else self._getindent(source)
+ lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+ return lines
+
+ def get_exconly(self, excinfo, indent=4, markall=False):
+ lines = []
+ indent = " " * indent
+ # get the real exception information out
+ exlines = excinfo.exconly(tryshort=True).split('\n')
+ failindent = self.fail_marker + indent[1:]
+ for line in exlines:
+ lines.append(failindent + line)
+ if not markall:
+ failindent = indent
+ return lines
+
+ def repr_locals(self, locals):
+ if self.showlocals:
+ lines = []
+ keys = [loc for loc in locals if loc[0] != "@"]
+ keys.sort()
+ for name in keys:
+ value = locals[name]
+ if name == '__builtins__':
+ lines.append("__builtins__ = <builtins>")
+ else:
+ # This formatting could all be handled by the
+ # _repr() function, which is only reprlib.Repr in
+ # disguise, so is very configurable.
+ str_repr = self._saferepr(value)
+ #if len(str_repr) < 70 or not isinstance(value,
+ # (list, tuple, dict)):
+ lines.append("%-10s = %s" %(name, str_repr))
+ #else:
+ # self._line("%-10s =\\" % (name,))
+ # # XXX
+ # py.std.pprint.pprint(value, stream=self.excinfowriter)
+ return ReprLocals(lines)
+
+ def repr_traceback_entry(self, entry, excinfo=None):
+ source = self._getentrysource(entry)
+ if source is None:
+ source = py.code.Source("???")
+ line_index = 0
+ else:
+ # entry.getfirstlinesource() can be -1, should be 0 on jython
+ line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+ lines = []
+ style = entry._repr_style
+ if style is None:
+ style = self.style
+ if style in ("short", "long"):
+ short = style == "short"
+ reprargs = self.repr_args(entry) if not short else None
+ s = self.get_source(source, line_index, excinfo, short=short)
+ lines.extend(s)
+ if short:
+ message = "in %s" %(entry.name)
+ else:
+ message = excinfo and excinfo.typename or ""
+ path = self._makepath(entry.path)
+ filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+ localsrepr = None
+ if not short:
+ localsrepr = self.repr_locals(entry.locals)
+ return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+ if excinfo:
+ lines.extend(self.get_exconly(excinfo, indent=4))
+ return ReprEntry(lines, None, None, None, style)
+
+ def _makepath(self, path):
+ if not self.abspath:
+ try:
+ np = py.path.local().bestrelpath(path)
+ except OSError:
+ return path
+ if len(np) < len(str(path)):
+ path = np
+ return path
+
+ def repr_traceback(self, excinfo):
+ traceback = excinfo.traceback
+ if self.tbfilter:
+ traceback = traceback.filter()
+ recursionindex = None
+ if excinfo.errisinstance(RuntimeError):
+ if "maximum recursion depth exceeded" in str(excinfo.value):
+ recursionindex = traceback.recursionindex()
+ last = traceback[-1]
+ entries = []
+ extraline = None
+ for index, entry in enumerate(traceback):
+ einfo = (last == entry) and excinfo or None
+ reprentry = self.repr_traceback_entry(entry, einfo)
+ entries.append(reprentry)
+ if index == recursionindex:
+ extraline = "!!! Recursion detected (same locals & position)"
+ break
+ return ReprTraceback(entries, extraline, style=self.style)
+
+ def repr_excinfo(self, excinfo):
+ reprtraceback = self.repr_traceback(excinfo)
+ reprcrash = excinfo._getreprcrash()
+ return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+ def __str__(self):
+ s = self.__unicode__()
+ if sys.version_info[0] < 3:
+ s = s.encode('utf-8')
+ return s
+
+ def __unicode__(self):
+ # FYI this is called from pytest-xdist's serialization of exception
+ # information.
+ io = py.io.TextIO()
+ tw = py.io.TerminalWriter(file=io)
+ self.toterminal(tw)
+ return io.getvalue().strip()
+
+ def __repr__(self):
+ return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ReprExceptionInfo(TerminalRepr):
+ def __init__(self, reprtraceback, reprcrash):
+ self.reprtraceback = reprtraceback
+ self.reprcrash = reprcrash
+ self.sections = []
+
+ def addsection(self, name, content, sep="-"):
+ self.sections.append((name, content, sep))
+
+ def toterminal(self, tw):
+ self.reprtraceback.toterminal(tw)
+ for name, content, sep in self.sections:
+ tw.sep(sep, name)
+ tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+ entrysep = "_ "
+
+ def __init__(self, reprentries, extraline, style):
+ self.reprentries = reprentries
+ self.extraline = extraline
+ self.style = style
+
+ def toterminal(self, tw):
+ # the entries might have different styles
+ last_style = None
+ for i, entry in enumerate(self.reprentries):
+ if entry.style == "long":
+ tw.line("")
+ entry.toterminal(tw)
+ if i < len(self.reprentries) - 1:
+ next_entry = self.reprentries[i+1]
+ if entry.style == "long" or \
+ entry.style == "short" and next_entry.style == "long":
+ tw.sep(self.entrysep)
+
+ if self.extraline:
+ tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+ def __init__(self, tblines):
+ self.style = "native"
+ self.reprentries = [ReprEntryNative(tblines)]
+ self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+ style = "native"
+
+ def __init__(self, tblines):
+ self.lines = tblines
+
+ def toterminal(self, tw):
+ tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+ localssep = "_ "
+
+ def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+ self.lines = lines
+ self.reprfuncargs = reprfuncargs
+ self.reprlocals = reprlocals
+ self.reprfileloc = filelocrepr
+ self.style = style
+
+ def toterminal(self, tw):
+ if self.style == "short":
+ self.reprfileloc.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ #tw.line("")
+ return
+ if self.reprfuncargs:
+ self.reprfuncargs.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ if self.reprlocals:
+ #tw.sep(self.localssep, "Locals")
+ tw.line("")
+ self.reprlocals.toterminal(tw)
+ if self.reprfileloc:
+ if self.lines:
+ tw.line("")
+ self.reprfileloc.toterminal(tw)
+
+ def __str__(self):
+ return "%s\n%s\n%s" % ("\n".join(self.lines),
+ self.reprlocals,
+ self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+ def __init__(self, path, lineno, message):
+ self.path = str(path)
+ self.lineno = lineno
+ self.message = message
+
+ def toterminal(self, tw):
+ # filename and lineno output for each entry,
+ # using an output format that most editors unterstand
+ msg = self.message
+ i = msg.find("\n")
+ if i != -1:
+ msg = msg[:i]
+ tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+ def __init__(self, lines):
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+ def __init__(self, args):
+ self.args = args
+
+ def toterminal(self, tw):
+ if self.args:
+ linesofar = ""
+ for name, value in self.args:
+ ns = "%s = %s" %(name, value)
+ if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+ if linesofar:
+ tw.line(linesofar)
+ linesofar = ns
+ else:
+ if linesofar:
+ linesofar += ", " + ns
+ else:
+ linesofar = ns
+ if linesofar:
+ tw.line(linesofar)
+ tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+ """ put compile and AssertionError builtins to Python's builtins. """
+ if assertion:
+ from py._code import assertion
+ l = oldbuiltins.setdefault('AssertionError', [])
+ l.append(py.builtin.builtins.AssertionError)
+ py.builtin.builtins.AssertionError = assertion.AssertionError
+ if compile:
+ l = oldbuiltins.setdefault('compile', [])
+ l.append(py.builtin.builtins.compile)
+ py.builtin.builtins.compile = py.code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+ """ remove compile and AssertionError builtins from Python builtins. """
+ if assertion:
+ py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+ if compile:
+ py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj, trycall=True):
+ """ return code object for given function. """
+ try:
+ return obj.__code__
+ except AttributeError:
+ obj = getattr(obj, 'im_func', obj)
+ obj = getattr(obj, 'func_code', obj)
+ obj = getattr(obj, 'f_code', obj)
+ obj = getattr(obj, '__code__', obj)
+ if trycall and not hasattr(obj, 'co_firstlineno'):
+ if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+ x = getrawcode(obj.__call__, trycall=False)
+ if hasattr(x, 'co_firstlineno'):
+ return x
+ return obj
+
diff --git a/testing/web-platform/tests/tools/py/py/_code/source.py b/testing/web-platform/tests/tools/py/py/_code/source.py
new file mode 100644
index 000000000..3a648e635
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_code/source.py
@@ -0,0 +1,419 @@
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+from types import ModuleType
+cpy_compile = compile
+
+try:
+ import _ast
+ from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+ _AST_FLAG = 0
+ _ast = None
+
+
+class Source(object):
+ """ a immutable object holding a source code fragment,
+ possibly deindenting it.
+ """
+ _compilecounter = 0
+ def __init__(self, *parts, **kwargs):
+ self.lines = lines = []
+ de = kwargs.get('deindent', True)
+ rstrip = kwargs.get('rstrip', True)
+ for part in parts:
+ if not part:
+ partlines = []
+ if isinstance(part, Source):
+ partlines = part.lines
+ elif isinstance(part, (tuple, list)):
+ partlines = [x.rstrip("\n") for x in part]
+ elif isinstance(part, py.builtin._basestring):
+ partlines = part.split('\n')
+ if rstrip:
+ while partlines:
+ if partlines[-1].strip():
+ break
+ partlines.pop()
+ else:
+ partlines = getsource(part, deindent=de).lines
+ if de:
+ partlines = deindent(partlines)
+ lines.extend(partlines)
+
+ def __eq__(self, other):
+ try:
+ return self.lines == other.lines
+ except AttributeError:
+ if isinstance(other, str):
+ return str(self) == other
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self.lines[key]
+ else:
+ if key.step not in (None, 1):
+ raise IndexError("cannot slice a Source with a step")
+ return self.__getslice__(key.start, key.stop)
+
+ def __len__(self):
+ return len(self.lines)
+
+ def __getslice__(self, start, end):
+ newsource = Source()
+ newsource.lines = self.lines[start:end]
+ return newsource
+
+ def strip(self):
+ """ return new source object with trailing
+ and leading blank lines removed.
+ """
+ start, end = 0, len(self)
+ while start < end and not self.lines[start].strip():
+ start += 1
+ while end > start and not self.lines[end-1].strip():
+ end -= 1
+ source = Source()
+ source.lines[:] = self.lines[start:end]
+ return source
+
+ def putaround(self, before='', after='', indent=' ' * 4):
+ """ return a copy of the source object with
+ 'before' and 'after' wrapped around it.
+ """
+ before = Source(before)
+ after = Source(after)
+ newsource = Source()
+ lines = [ (indent + line) for line in self.lines]
+ newsource.lines = before.lines + lines + after.lines
+ return newsource
+
+ def indent(self, indent=' ' * 4):
+ """ return a copy of the source object with
+ all lines indented by the given indent-string.
+ """
+ newsource = Source()
+ newsource.lines = [(indent+line) for line in self.lines]
+ return newsource
+
+ def getstatement(self, lineno, assertion=False):
+ """ return Source statement which contains the
+ given linenumber (counted from 0).
+ """
+ start, end = self.getstatementrange(lineno, assertion)
+ return self[start:end]
+
+ def getstatementrange(self, lineno, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ """
+ if not (0 <= lineno < len(self)):
+ raise IndexError("lineno out of range")
+ ast, start, end = getstatementrange_ast(lineno, self)
+ return start, end
+
+ def deindent(self, offset=None):
+ """ return a new source object deindented by offset.
+ If offset is None then guess an indentation offset from
+ the first non-blank line. Subsequent lines which have a
+ lower indentation offset will be copied verbatim as
+ they are assumed to be part of multilines.
+ """
+ # XXX maybe use the tokenizer to properly handle multiline
+ # strings etc.pp?
+ newsource = Source()
+ newsource.lines[:] = deindent(self.lines, offset)
+ return newsource
+
+ def isparseable(self, deindent=True):
+ """ return True if source is parseable, heuristically
+ deindenting it by default.
+ """
+ try:
+ import parser
+ except ImportError:
+ syntax_checker = lambda x: compile(x, 'asd', 'exec')
+ else:
+ syntax_checker = parser.suite
+
+ if deindent:
+ source = str(self.deindent())
+ else:
+ source = str(self)
+ try:
+ #compile(source+'\n', "x", "exec")
+ syntax_checker(source+'\n')
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return False
+ else:
+ return True
+
+ def __str__(self):
+ return "\n".join(self.lines)
+
+ def compile(self, filename=None, mode='exec',
+ flag=generators.compiler_flag,
+ dont_inherit=0, _genframe=None):
+ """ return compiled code object. if filename is None
+ invent an artificial filename which displays
+ the source/line position of the caller frame.
+ """
+ if not filename or py.path.local(filename).check(file=0):
+ if _genframe is None:
+ _genframe = sys._getframe(1) # the caller
+ fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+ base = "<%d-codegen " % self._compilecounter
+ self.__class__._compilecounter += 1
+ if not filename:
+ filename = base + '%s:%d>' % (fn, lineno)
+ else:
+ filename = base + '%r %s:%d>' % (filename, fn, lineno)
+ source = "\n".join(self.lines) + '\n'
+ try:
+ co = cpy_compile(source, filename, mode, flag)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ # re-represent syntax errors from parsing python strings
+ msglines = self.lines[:ex.lineno]
+ if ex.offset:
+ msglines.append(" "*ex.offset + '^')
+ msglines.append("(code was compiled probably from here: %s)" % filename)
+ newex = SyntaxError('\n'.join(msglines))
+ newex.offset = ex.offset
+ newex.lineno = ex.lineno
+ newex.text = ex.text
+ raise newex
+ else:
+ if flag & _AST_FLAG:
+ return co
+ lines = [(x + "\n") for x in self.lines]
+ if sys.version_info[0] >= 3:
+ # XXX py3's inspect.getsourcefile() checks for a module
+ # and a pep302 __loader__ ... we don't have a module
+ # at code compile-time so we need to fake it here
+ m = ModuleType("_pycodecompile_pseudo_module")
+ py.std.inspect.modulesbyfile[filename] = None
+ py.std.sys.modules[None] = m
+ m.__loader__ = 1
+ py.std.linecache.cache[filename] = (1, None, lines, filename)
+ return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+ generators.compiler_flag, dont_inherit=0):
+ """ compile the given source to a raw code object,
+ and maintain an internal cache which allows later
+ retrieval of the source code for the code object
+ and any recursively created code objects.
+ """
+ if _ast is not None and isinstance(source, _ast.AST):
+ # XXX should Source support having AST?
+ return cpy_compile(source, filename, mode, flags, dont_inherit)
+ _genframe = sys._getframe(1) # the caller
+ s = Source(source)
+ co = s.compile(filename, mode, flags, _genframe=_genframe)
+ return co
+
+
+def getfslineno(obj):
+ """ Return source location (path, lineno) for the given object.
+ If the source cannot be determined return ("", -1)
+ """
+ try:
+ code = py.code.Code(obj)
+ except TypeError:
+ try:
+ fn = (py.std.inspect.getsourcefile(obj) or
+ py.std.inspect.getfile(obj))
+ except TypeError:
+ return "", -1
+
+ fspath = fn and py.path.local(fn) or None
+ lineno = -1
+ if fspath:
+ try:
+ _, lineno = findsource(obj)
+ except IOError:
+ pass
+ else:
+ fspath = code.path
+ lineno = code.firstlineno
+ assert isinstance(lineno, int)
+ return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+ try:
+ sourcelines, lineno = py.std.inspect.findsource(obj)
+ except py.builtin._sysex:
+ raise
+ except:
+ return None, -1
+ source = Source()
+ source.lines = [line.rstrip() for line in sourcelines]
+ return source, lineno
+
+def getsource(obj, **kwargs):
+ obj = py.code.getrawcode(obj)
+ try:
+ strsrc = inspect.getsource(obj)
+ except IndentationError:
+ strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+ assert isinstance(strsrc, str)
+ return Source(strsrc, **kwargs)
+
+def deindent(lines, offset=None):
+ if offset is None:
+ for line in lines:
+ line = line.expandtabs()
+ s = line.lstrip()
+ if s:
+ offset = len(line)-len(s)
+ break
+ else:
+ offset = 0
+ if offset == 0:
+ return list(lines)
+ newlines = []
+ def readline_generator(lines):
+ for line in lines:
+ yield line + '\n'
+ while True:
+ yield ''
+
+ it = readline_generator(lines)
+
+ try:
+ for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+ if sline > len(lines):
+ break # End of input reached
+ if sline > len(newlines):
+ line = lines[sline - 1].expandtabs()
+ if line.lstrip() and line[:offset].isspace():
+ line = line[offset:] # Deindent
+ newlines.append(line)
+
+ for i in range(sline, eline):
+ # Don't deindent continuing lines of
+ # multiline tokens (i.e. multiline strings)
+ newlines.append(lines[i])
+ except (IndentationError, tokenize.TokenError):
+ pass
+ # Add any lines we didn't see. E.g. if an exception was raised.
+ newlines.extend(lines[len(newlines):])
+ return newlines
+
+
+def get_statement_startend2(lineno, node):
+ import ast
+ # flatten all statements and except handlers into one lineno-list
+ # AST's line numbers start indexing at 1
+ l = []
+ for x in ast.walk(node):
+ if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+ l.append(x.lineno - 1)
+ for name in "finalbody", "orelse":
+ val = getattr(x, name, None)
+ if val:
+ # treat the finally/orelse part as its own statement
+ l.append(val[0].lineno - 1 - 1)
+ l.sort()
+ insert_index = bisect_right(l, lineno)
+ start = l[insert_index - 1]
+ if insert_index >= len(l):
+ end = None
+ else:
+ end = l[insert_index]
+ return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+ if astnode is None:
+ content = str(source)
+ if sys.version_info < (2,7):
+ content += "\n"
+ try:
+ astnode = compile(content, "source", "exec", 1024) # 1024 for AST
+ except ValueError:
+ start, end = getstatementrange_old(lineno, source, assertion)
+ return None, start, end
+ start, end = get_statement_startend2(lineno, astnode)
+ # we need to correct the end:
+ # - ast-parsing strips comments
+ # - there might be empty lines
+ # - we might have lesser indented code blocks at the end
+ if end is None:
+ end = len(source.lines)
+
+ if end > start + 1:
+ # make sure we don't span differently indented code blocks
+ # by using the BlockFinder helper used which inspect.getsource() uses itself
+ block_finder = inspect.BlockFinder()
+ # if we start with an indented line, put blockfinder to "started" mode
+ block_finder.started = source.lines[start][0].isspace()
+ it = ((x + "\n") for x in source.lines[start:end])
+ try:
+ for tok in tokenize.generate_tokens(lambda: next(it)):
+ block_finder.tokeneater(*tok)
+ except (inspect.EndOfBlock, IndentationError):
+ end = block_finder.last + start
+ except Exception:
+ pass
+
+ # the end might still point to a comment or empty line, correct it
+ while end:
+ line = source.lines[end - 1].lstrip()
+ if line.startswith("#") or not line:
+ end -= 1
+ else:
+ break
+ return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ raise an IndexError if no such statementrange can be found.
+ """
+ # XXX this logic is only used on python2.4 and below
+ # 1. find the start of the statement
+ from codeop import compile_command
+ for start in range(lineno, -1, -1):
+ if assertion:
+ line = source.lines[start]
+ # the following lines are not fully tested, change with care
+ if 'super' in line and 'self' in line and '__init__' in line:
+ raise IndexError("likely a subclass")
+ if "assert" not in line and "raise" not in line:
+ continue
+ trylines = source.lines[start:lineno+1]
+ # quick hack to prepare parsing an indented line with
+ # compile_command() (which errors on "return" outside defs)
+ trylines.insert(0, 'def xxx():')
+ trysource = '\n '.join(trylines)
+ # ^ space here
+ try:
+ compile_command(trysource)
+ except (SyntaxError, OverflowError, ValueError):
+ continue
+
+ # 2. find the end of the statement
+ for end in range(lineno+1, len(source)+1):
+ trysource = source[start:end]
+ if trysource.isparseable():
+ return start, end
+ raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
diff --git a/testing/web-platform/tests/tools/py/py/_error.py b/testing/web-platform/tests/tools/py/py/_error.py
new file mode 100644
index 000000000..550fb521a
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_error.py
@@ -0,0 +1,88 @@
+"""
+create errno-specific classes for IO or os calls.
+
+"""
+import sys, os, errno
+
+class Error(EnvironmentError):
+ def __repr__(self):
+ return "%s.%s %r: %s " %(self.__class__.__module__,
+ self.__class__.__name__,
+ self.__class__.__doc__,
+ " ".join(map(str, self.args)),
+ #repr(self.args)
+ )
+
+ def __str__(self):
+ s = "[%s]: %s" %(self.__class__.__doc__,
+ " ".join(map(str, self.args)),
+ )
+ return s
+
+_winerrnomap = {
+ 2: errno.ENOENT,
+ 3: errno.ENOENT,
+ 17: errno.EEXIST,
+ 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
+ 22: errno.ENOTDIR,
+ 20: errno.ENOTDIR,
+ 267: errno.ENOTDIR,
+ 5: errno.EACCES, # anything better?
+}
+
+class ErrorMaker(object):
+ """ lazily provides Exception classes for each possible POSIX errno
+ (as defined per the 'errno' module). All such instances
+ subclass EnvironmentError.
+ """
+ Error = Error
+ _errno2class = {}
+
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError(name)
+ eno = getattr(errno, name)
+ cls = self._geterrnoclass(eno)
+ setattr(self, name, cls)
+ return cls
+
+ def _geterrnoclass(self, eno):
+ try:
+ return self._errno2class[eno]
+ except KeyError:
+ clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
+ errorcls = type(Error)(clsname, (Error,),
+ {'__module__':'py.error',
+ '__doc__': os.strerror(eno)})
+ self._errno2class[eno] = errorcls
+ return errorcls
+
+ def checked_call(self, func, *args, **kwargs):
+ """ call a function and raise an errno-exception if applicable. """
+ __tracebackhide__ = True
+ try:
+ return func(*args, **kwargs)
+ except self.Error:
+ raise
+ except (OSError, EnvironmentError):
+ cls, value, tb = sys.exc_info()
+ if not hasattr(value, 'errno'):
+ raise
+ __tracebackhide__ = False
+ errno = value.errno
+ try:
+ if not isinstance(value, WindowsError):
+ raise NameError
+ except NameError:
+ # we are not on Windows, or we got a proper OSError
+ cls = self._geterrnoclass(errno)
+ else:
+ try:
+ cls = self._geterrnoclass(_winerrnomap[errno])
+ except KeyError:
+ raise value
+ raise cls("%s%r" % (func.__name__, args))
+ __tracebackhide__ = True
+
+
+error = ErrorMaker()
diff --git a/testing/web-platform/tests/tools/py/py/_iniconfig.py b/testing/web-platform/tests/tools/py/py/_iniconfig.py
new file mode 100644
index 000000000..92b50bd85
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_iniconfig.py
@@ -0,0 +1,162 @@
+""" brain-dead simple parser for ini-style files.
+(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed
+"""
+__version__ = "0.2.dev2"
+
+__all__ = ['IniConfig', 'ParseError']
+
+COMMENTCHARS = "#;"
+
+class ParseError(Exception):
+ def __init__(self, path, lineno, msg):
+ Exception.__init__(self, path, lineno, msg)
+ self.path = path
+ self.lineno = lineno
+ self.msg = msg
+
+ def __str__(self):
+ return "%s:%s: %s" %(self.path, self.lineno+1, self.msg)
+
+class SectionWrapper(object):
+ def __init__(self, config, name):
+ self.config = config
+ self.name = name
+
+ def lineof(self, name):
+ return self.config.lineof(self.name, name)
+
+ def get(self, key, default=None, convert=str):
+ return self.config.get(self.name, key, convert=convert, default=default)
+
+ def __getitem__(self, key):
+ return self.config.sections[self.name][key]
+
+ def __iter__(self):
+ section = self.config.sections.get(self.name, [])
+ def lineof(key):
+ return self.config.lineof(self.name, key)
+ for name in sorted(section, key=lineof):
+ yield name
+
+ def items(self):
+ for name in self:
+ yield name, self[name]
+
+
+class IniConfig(object):
+ def __init__(self, path, data=None):
+ self.path = str(path) # convenience
+ if data is None:
+ f = open(self.path)
+ try:
+ tokens = self._parse(iter(f))
+ finally:
+ f.close()
+ else:
+ tokens = self._parse(data.splitlines(True))
+
+ self._sources = {}
+ self.sections = {}
+
+ for lineno, section, name, value in tokens:
+ if section is None:
+ self._raise(lineno, 'no section header defined')
+ self._sources[section, name] = lineno
+ if name is None:
+ if section in self.sections:
+ self._raise(lineno, 'duplicate section %r'%(section, ))
+ self.sections[section] = {}
+ else:
+ if name in self.sections[section]:
+ self._raise(lineno, 'duplicate name %r'%(name, ))
+ self.sections[section][name] = value
+
+ def _raise(self, lineno, msg):
+ raise ParseError(self.path, lineno, msg)
+
+ def _parse(self, line_iter):
+ result = []
+ section = None
+ for lineno, line in enumerate(line_iter):
+ name, data = self._parseline(line, lineno)
+ # new value
+ if name is not None and data is not None:
+ result.append((lineno, section, name, data))
+ # new section
+ elif name is not None and data is None:
+ if not name:
+ self._raise(lineno, 'empty section name')
+ section = name
+ result.append((lineno, section, None, None))
+ # continuation
+ elif name is None and data is not None:
+ if not result:
+ self._raise(lineno, 'unexpected value continuation')
+ last = result.pop()
+ last_name, last_data = last[-2:]
+ if last_name is None:
+ self._raise(lineno, 'unexpected value continuation')
+
+ if last_data:
+ data = '%s\n%s' % (last_data, data)
+ result.append(last[:-1] + (data,))
+ return result
+
+ def _parseline(self, line, lineno):
+ # blank lines
+ if iscommentline(line):
+ line = ""
+ else:
+ line = line.rstrip()
+ if not line:
+ return None, None
+ # section
+ if line[0] == '[':
+ realline = line
+ for c in COMMENTCHARS:
+ line = line.split(c)[0].rstrip()
+ if line[-1] == "]":
+ return line[1:-1], None
+ return None, realline.strip()
+ # value
+ elif not line[0].isspace():
+ try:
+ name, value = line.split('=', 1)
+ if ":" in name:
+ raise ValueError()
+ except ValueError:
+ try:
+ name, value = line.split(":", 1)
+ except ValueError:
+ self._raise(lineno, 'unexpected line: %r' % line)
+ return name.strip(), value.strip()
+ # continuation
+ else:
+ return None, line.strip()
+
+ def lineof(self, section, name=None):
+ lineno = self._sources.get((section, name))
+ if lineno is not None:
+ return lineno + 1
+
+ def get(self, section, name, default=None, convert=str):
+ try:
+ return convert(self.sections[section][name])
+ except KeyError:
+ return default
+
+ def __getitem__(self, name):
+ if name not in self.sections:
+ raise KeyError(name)
+ return SectionWrapper(self, name)
+
+ def __iter__(self):
+ for name in sorted(self.sections, key=self.lineof):
+ yield SectionWrapper(self, name)
+
+ def __contains__(self, arg):
+ return arg in self.sections
+
+def iscommentline(line):
+ c = line.lstrip()[:1]
+ return c in COMMENTCHARS
diff --git a/testing/web-platform/tests/tools/py/py/_io/__init__.py b/testing/web-platform/tests/tools/py/py/_io/__init__.py
new file mode 100644
index 000000000..835f01f3a
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_io/__init__.py
@@ -0,0 +1 @@
+""" input/output helping """
diff --git a/testing/web-platform/tests/tools/py/py/_io/capture.py b/testing/web-platform/tests/tools/py/py/_io/capture.py
new file mode 100644
index 000000000..bc157ed97
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_io/capture.py
@@ -0,0 +1,371 @@
+import os
+import sys
+import py
+import tempfile
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+if sys.version_info < (3,0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" %(data,))
+ StringIO.write(self, data)
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None and targetfd != 0:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(self.targetfd)
+ if patchsys:
+ self._oldsys = getattr(sys, patchsysdict[targetfd])
+ if now:
+ self.start()
+
+ def start(self):
+ try:
+ os.fstat(self._savefd)
+ except OSError:
+ raise ValueError("saved filedescriptor not valid, "
+ "did you call start() twice?")
+ if self.targetfd == 0 and not self.tmpfile:
+ fd = os.open(devnullpath, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+ else:
+ os.dup2(self.tmpfile.fileno(), self.targetfd)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ os.close(self._savefd)
+ if self.targetfd != 0:
+ self.tmpfile.seek(0)
+ if hasattr(self, '_oldsys'):
+ setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ mode = mode or f.mode
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ if sys.version_info >= (3,0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ elif isinstance(obj, str):
+ pass
+ else:
+ obj = str(obj)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+class Capture(object):
+ def call(cls, func, *args, **kwargs):
+ """ return a (res, out, err) tuple where
+ out and err represent the output/error output
+ during function execution.
+ call the given function with args/kwargs
+ and capture output/error during its execution.
+ """
+ so = cls()
+ try:
+ res = func(*args, **kwargs)
+ finally:
+ out, err = so.reset()
+ return res, out, err
+ call = classmethod(call)
+
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already reset")
+ self._reset = True
+ outfile, errfile = self.done(save=False)
+ out, err = "", ""
+ if outfile and not outfile.closed:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile and not errfile.closed:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ return outerr
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin). If any of the 0,1,2 file descriptors
+ is invalid it will not be captured.
+ """
+ def __init__(self, out=True, err=True, mixed=False,
+ in_=True, patchsys=True, now=True):
+ self._options = {
+ "out": out,
+ "err": err,
+ "mixed": mixed,
+ "in_": in_,
+ "patchsys": patchsys,
+ "now": now,
+ }
+ self._save()
+ if now:
+ self.startall()
+
+ def _save(self):
+ in_ = self._options['in_']
+ out = self._options['out']
+ err = self._options['err']
+ mixed = self._options['mixed']
+ patchsys = self._options['patchsys']
+ if in_:
+ try:
+ self.in_ = FDCapture(0, tmpfile=None, now=False,
+ patchsys=patchsys)
+ except OSError:
+ pass
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ try:
+ self.out = FDCapture(1, tmpfile=tmpfile,
+ now=False, patchsys=patchsys)
+ self._options['out'] = self.out.tmpfile
+ except OSError:
+ pass
+ if err:
+ if out and mixed:
+ tmpfile = self.out.tmpfile
+ elif hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ try:
+ self.err = FDCapture(2, tmpfile=tmpfile,
+ now=False, patchsys=patchsys)
+ self._options['err'] = self.err.tmpfile
+ except OSError:
+ pass
+
+ def startall(self):
+ if hasattr(self, 'in_'):
+ self.in_.start()
+ if hasattr(self, 'out'):
+ self.out.start()
+ if hasattr(self, 'err'):
+ self.err.start()
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if hasattr(self, 'out') and not self.out.tmpfile.closed:
+ outfile = self.out.done()
+ if hasattr(self, 'err') and not self.err.tmpfile.closed:
+ errfile = self.err.done()
+ if hasattr(self, 'in_'):
+ tmpfile = self.in_.done()
+ if save:
+ self._save()
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ if hasattr(self, "out"):
+ out = self._readsnapshot(self.out.tmpfile)
+ else:
+ out = ""
+ if hasattr(self, "err"):
+ err = self._readsnapshot(self.err.tmpfile)
+ else:
+ err = ""
+ return [out, err]
+
+ def _readsnapshot(self, f):
+ f.seek(0)
+ res = f.read()
+ enc = getattr(f, "encoding", None)
+ if enc:
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
+ self._oldout = sys.stdout
+ self._olderr = sys.stderr
+ self._oldin = sys.stdin
+ if out and not hasattr(out, 'file'):
+ out = TextIO()
+ self.out = out
+ if err:
+ if mixed:
+ err = out
+ elif not hasattr(err, 'write'):
+ err = TextIO()
+ self.err = err
+ self.in_ = in_
+ if now:
+ self.startall()
+
+ def startall(self):
+ if self.out:
+ sys.stdout = self.out
+ if self.err:
+ sys.stderr = self.err
+ if self.in_:
+ sys.stdin = self.in_ = DontReadFromInput()
+
+ def done(self, save=True):
+ """ return (outfile, errfile) and stop capturing. """
+ outfile = errfile = None
+ if self.out and not self.out.closed:
+ sys.stdout = self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ if self.err and not self.err.closed:
+ sys.stderr = self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ if self.in_:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ self.startall()
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self.out:
+ out = self.out.getvalue()
+ self.out.truncate(0)
+ self.out.seek(0)
+ if self.err:
+ err = self.err.getvalue()
+ self.err.truncate(0)
+ self.err.seek(0)
+ return out, err
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+ def isatty(self):
+ return False
+ def close(self):
+ pass
+
+try:
+ devnullpath = os.devnull
+except AttributeError:
+ if os.name == 'nt':
+ devnullpath = 'NUL'
+ else:
+ devnullpath = '/dev/null'
diff --git a/testing/web-platform/tests/tools/py/py/_io/saferepr.py b/testing/web-platform/tests/tools/py/py/_io/saferepr.py
new file mode 100644
index 000000000..8518290ef
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_io/saferepr.py
@@ -0,0 +1,71 @@
+import py
+import sys
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+class SafeRepr(reprlib.Repr):
+ """ subclass of repr.Repr that limits the resulting size of repr()
+ and includes information on exceptions raised during the call.
+ """
+ def repr(self, x):
+ return self._callhelper(reprlib.Repr.repr, self, x)
+
+ def repr_unicode(self, x, level):
+ # Strictly speaking wrong on narrow builds
+ def repr(u):
+ if "'" not in u:
+ return py.builtin._totext("'%s'") % u
+ elif '"' not in u:
+ return py.builtin._totext('"%s"') % u
+ else:
+ return py.builtin._totext("'%s'") % u.replace("'", r"\'")
+ s = repr(x[:self.maxstring])
+ if len(s) > self.maxstring:
+ i = max(0, (self.maxstring-3)//2)
+ j = max(0, self.maxstring-3-i)
+ s = repr(x[:i] + x[len(x)-j:])
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+ def repr_instance(self, x, level):
+ return self._callhelper(builtin_repr, x)
+
+ def _callhelper(self, call, x, *args):
+ try:
+ # Try the vanilla repr and make sure that the result is a string
+ s = call(x, *args)
+ except py.builtin._sysex:
+ raise
+ except:
+ cls, e, tb = sys.exc_info()
+ exc_name = getattr(cls, '__name__', 'unknown')
+ try:
+ exc_info = str(e)
+ except py.builtin._sysex:
+ raise
+ except:
+ exc_info = 'unknown'
+ return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
+ exc_name, exc_info, x.__class__.__name__, id(x))
+ else:
+ if len(s) > self.maxsize:
+ i = max(0, (self.maxsize-3)//2)
+ j = max(0, self.maxsize-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+def saferepr(obj, maxsize=240):
+ """ return a size-limited safe repr-string for the given object.
+ Failing __repr__ functions of user instances will be represented
+ with a short exception info and 'saferepr' generally takes
+ care to never raise exceptions itself. This function is a wrapper
+ around the Repr/reprlib functionality of the standard 2.6 lib.
+ """
+ # review exception handling
+ srepr = SafeRepr()
+ srepr.maxstring = maxsize
+ srepr.maxsize = maxsize
+ srepr.maxother = 160
+ return srepr.repr(obj)
diff --git a/testing/web-platform/tests/tools/py/py/_io/terminalwriter.py b/testing/web-platform/tests/tools/py/py/_io/terminalwriter.py
new file mode 100644
index 000000000..cef1ff580
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_io/terminalwriter.py
@@ -0,0 +1,348 @@
+"""
+
+Helper functions for writing to terminals and files.
+
+"""
+
+
+import sys, os
+import py
+py3k = sys.version_info[0] >= 3
+from py.builtin import text, bytes
+
+win32_and_ctypes = False
+colorama = None
+if sys.platform == "win32":
+ try:
+ import colorama
+ except ImportError:
+ try:
+ import ctypes
+ win32_and_ctypes = True
+ except ImportError:
+ pass
+
+
+def _getdimensions():
+ import termios,fcntl,struct
+ call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
+ height,width = struct.unpack( "hhhh", call ) [:2]
+ return height, width
+
+
+def get_terminal_width():
+ height = width = 0
+ try:
+ height, width = _getdimensions()
+ except py.builtin._sysex:
+ raise
+ except:
+ # pass to fallback below
+ pass
+
+ if width == 0:
+ # FALLBACK:
+ # * some exception happened
+ # * or this is emacs terminal which reports (0,0)
+ width = int(os.environ.get('COLUMNS', 80))
+
+ # XXX the windows getdimensions may be bogus, let's sanify a bit
+ if width < 40:
+ width = 80
+ return width
+
+terminal_width = get_terminal_width()
+
+# XXX unify with _escaped func below
+def ansi_print(text, esc, file=None, newline=True, flush=False):
+ if file is None:
+ file = sys.stderr
+ text = text.rstrip()
+ if esc and not isinstance(esc, tuple):
+ esc = (esc,)
+ if esc and sys.platform != "win32" and file.isatty():
+ text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
+ text +
+ '\x1b[0m') # ANSI color code "reset"
+ if newline:
+ text += '\n'
+
+ if esc and win32_and_ctypes and file.isatty():
+ if 1 in esc:
+ bold = True
+ esc = tuple([x for x in esc if x != 1])
+ else:
+ bold = False
+ esctable = {() : FOREGROUND_WHITE, # normal
+ (31,): FOREGROUND_RED, # red
+ (32,): FOREGROUND_GREEN, # green
+ (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
+ (34,): FOREGROUND_BLUE, # blue
+ (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
+ (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
+ (37,): FOREGROUND_WHITE, # white
+ (39,): FOREGROUND_WHITE, # reset
+ }
+ attr = esctable.get(esc, FOREGROUND_WHITE)
+ if bold:
+ attr |= FOREGROUND_INTENSITY
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ if file is sys.stderr:
+ handle = GetStdHandle(STD_ERROR_HANDLE)
+ else:
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ oldcolors = GetConsoleInfo(handle).wAttributes
+ attr |= (oldcolors & 0x0f0)
+ SetConsoleTextAttribute(handle, attr)
+ while len(text) > 32768:
+ file.write(text[:32768])
+ text = text[32768:]
+ if text:
+ file.write(text)
+ SetConsoleTextAttribute(handle, oldcolors)
+ else:
+ file.write(text)
+
+ if flush:
+ file.flush()
+
+def should_do_markup(file):
+ if os.environ.get('PY_COLORS') == '1':
+ return True
+ if os.environ.get('PY_COLORS') == '0':
+ return False
+ return hasattr(file, 'isatty') and file.isatty() \
+ and os.environ.get('TERM') != 'dumb' \
+ and not (sys.platform.startswith('java') and os._name == 'nt')
+
+class TerminalWriter(object):
+ _esctable = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, purple=35, cyan=36, white=37,
+ Black=40, Red=41, Green=42, Yellow=43,
+ Blue=44, Purple=45, Cyan=46, White=47,
+ bold=1, light=2, blink=5, invert=7)
+
+ # XXX deprecate stringio argument
+ def __init__(self, file=None, stringio=False, encoding=None):
+ if file is None:
+ if stringio:
+ self.stringio = file = py.io.TextIO()
+ else:
+ file = py.std.sys.stdout
+ elif py.builtin.callable(file) and not (
+ hasattr(file, "write") and hasattr(file, "flush")):
+ file = WriteFile(file, encoding=encoding)
+ if hasattr(file, "isatty") and file.isatty() and colorama:
+ file = colorama.AnsiToWin32(file).stream
+ self.encoding = encoding or getattr(file, 'encoding', "utf-8")
+ self._file = file
+ self.fullwidth = get_terminal_width()
+ self.hasmarkup = should_do_markup(file)
+ self._lastlen = 0
+
+ def _escaped(self, text, esc):
+ if esc and self.hasmarkup:
+ text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
+ text +'\x1b[0m')
+ return text
+
+ def markup(self, text, **kw):
+ esc = []
+ for name in kw:
+ if name not in self._esctable:
+ raise ValueError("unknown markup: %r" %(name,))
+ if kw[name]:
+ esc.append(self._esctable[name])
+ return self._escaped(text, tuple(esc))
+
+ def sep(self, sepchar, title=None, fullwidth=None, **kw):
+ if fullwidth is None:
+ fullwidth = self.fullwidth
+ # the goal is to have the line be as long as possible
+ # under the condition that len(line) <= fullwidth
+ if sys.platform == "win32":
+ # if we print in the last column on windows we are on a
+ # new line but there is no way to verify/neutralize this
+ # (we may not know the exact line width)
+ # so let's be defensive to avoid empty lines in the output
+ fullwidth -= 1
+ if title is not None:
+ # we want 2 + 2*len(fill) + len(title) <= fullwidth
+ # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
+ # 2*len(sepchar)*N <= fullwidth - len(title) - 2
+ # N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
+ N = (fullwidth - len(title) - 2) // (2*len(sepchar))
+ fill = sepchar * N
+ line = "%s %s %s" % (fill, title, fill)
+ else:
+ # we want len(sepchar)*N <= fullwidth
+ # i.e. N <= fullwidth // len(sepchar)
+ line = sepchar * (fullwidth // len(sepchar))
+ # in some situations there is room for an extra sepchar at the right,
+ # in particular if we consider that with a sepchar like "_ " the
+ # trailing space is not important at the end of the line
+ if len(line) + len(sepchar.rstrip()) <= fullwidth:
+ line += sepchar.rstrip()
+
+ self.line(line, **kw)
+
+ def write(self, msg, **kw):
+ if msg:
+ if not isinstance(msg, (bytes, text)):
+ msg = text(msg)
+ if self.hasmarkup and kw:
+ markupmsg = self.markup(msg, **kw)
+ else:
+ markupmsg = msg
+ write_out(self._file, markupmsg)
+
+ def line(self, s='', **kw):
+ self.write(s, **kw)
+ self._checkfill(s)
+ self.write('\n')
+
+ def reline(self, line, **kw):
+ if not self.hasmarkup:
+ raise ValueError("cannot use rewrite-line without terminal")
+ self.write(line, **kw)
+ self._checkfill(line)
+ self.write('\r')
+ self._lastlen = len(line)
+
+ def _checkfill(self, line):
+ diff2last = self._lastlen - len(line)
+ if diff2last > 0:
+ self.write(" " * diff2last)
+
+class Win32ConsoleWriter(TerminalWriter):
+ def write(self, msg, **kw):
+ if msg:
+ if not isinstance(msg, (bytes, text)):
+ msg = text(msg)
+ oldcolors = None
+ if self.hasmarkup and kw:
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ oldcolors = GetConsoleInfo(handle).wAttributes
+ default_bg = oldcolors & 0x00F0
+ attr = default_bg
+ if kw.pop('bold', False):
+ attr |= FOREGROUND_INTENSITY
+
+ if kw.pop('red', False):
+ attr |= FOREGROUND_RED
+ elif kw.pop('blue', False):
+ attr |= FOREGROUND_BLUE
+ elif kw.pop('green', False):
+ attr |= FOREGROUND_GREEN
+ elif kw.pop('yellow', False):
+ attr |= FOREGROUND_GREEN|FOREGROUND_RED
+ else:
+ attr |= oldcolors & 0x0007
+
+ SetConsoleTextAttribute(handle, attr)
+ write_out(self._file, msg)
+ if oldcolors:
+ SetConsoleTextAttribute(handle, oldcolors)
+
+class WriteFile(object):
+ def __init__(self, writemethod, encoding=None):
+ self.encoding = encoding
+ self._writemethod = writemethod
+
+ def write(self, data):
+ if self.encoding:
+ data = data.encode(self.encoding, "replace")
+ self._writemethod(data)
+
+ def flush(self):
+ return
+
+
+if win32_and_ctypes:
+ TerminalWriter = Win32ConsoleWriter
+ import ctypes
+ from ctypes import wintypes
+
+ # ctypes access to the Windows console
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ FOREGROUND_BLACK = 0x0000 # black text
+ FOREGROUND_BLUE = 0x0001 # text color contains blue.
+ FOREGROUND_GREEN = 0x0002 # text color contains green.
+ FOREGROUND_RED = 0x0004 # text color contains red.
+ FOREGROUND_WHITE = 0x0007
+ FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
+ BACKGROUND_BLACK = 0x0000 # background color black
+ BACKGROUND_BLUE = 0x0010 # background color contains blue.
+ BACKGROUND_GREEN = 0x0020 # background color contains green.
+ BACKGROUND_RED = 0x0040 # background color contains red.
+ BACKGROUND_WHITE = 0x0070
+ BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+
+ SHORT = ctypes.c_short
+ class COORD(ctypes.Structure):
+ _fields_ = [('X', SHORT),
+ ('Y', SHORT)]
+ class SMALL_RECT(ctypes.Structure):
+ _fields_ = [('Left', SHORT),
+ ('Top', SHORT),
+ ('Right', SHORT),
+ ('Bottom', SHORT)]
+ class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+ _fields_ = [('dwSize', COORD),
+ ('dwCursorPosition', COORD),
+ ('wAttributes', wintypes.WORD),
+ ('srWindow', SMALL_RECT),
+ ('dwMaximumWindowSize', COORD)]
+
+ _GetStdHandle = ctypes.windll.kernel32.GetStdHandle
+ _GetStdHandle.argtypes = [wintypes.DWORD]
+ _GetStdHandle.restype = wintypes.HANDLE
+ def GetStdHandle(kind):
+ return _GetStdHandle(kind)
+
+ SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
+ SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
+ SetConsoleTextAttribute.restype = wintypes.BOOL
+
+ _GetConsoleScreenBufferInfo = \
+ ctypes.windll.kernel32.GetConsoleScreenBufferInfo
+ _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
+ ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
+ _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+ def GetConsoleInfo(handle):
+ info = CONSOLE_SCREEN_BUFFER_INFO()
+ _GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
+ return info
+
+ def _getdimensions():
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ info = GetConsoleInfo(handle)
+ # Substract one from the width, otherwise the cursor wraps
+ # and the ending \n causes an empty line to display.
+ return info.dwSize.Y, info.dwSize.X - 1
+
+def write_out(fil, msg):
+ # XXX sometimes "msg" is of type bytes, sometimes text which
+ # complicates the situation. Should we try to enforce unicode?
+ try:
+ # on py27 and above writing out to sys.stdout with an encoding
+ # should usually work for unicode messages (if the encoding is
+ # capable of it)
+ fil.write(msg)
+ except UnicodeEncodeError:
+ # on py26 it might not work because stdout expects bytes
+ if fil.encoding:
+ try:
+ fil.write(msg.encode(fil.encoding))
+ except UnicodeEncodeError:
+ # it might still fail if the encoding is not capable
+ pass
+ else:
+ fil.flush()
+ return
+ # fallback: escape all unicode characters
+ msg = msg.encode("unicode-escape").decode("ascii")
+ fil.write(msg)
+ fil.flush()
diff --git a/testing/web-platform/tests/tools/py/py/_log/__init__.py b/testing/web-platform/tests/tools/py/py/_log/__init__.py
new file mode 100644
index 000000000..fad62e960
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_log/__init__.py
@@ -0,0 +1,2 @@
+""" logging API ('producers' and 'consumers' connected via keywords) """
+
diff --git a/testing/web-platform/tests/tools/py/py/_log/log.py b/testing/web-platform/tests/tools/py/py/_log/log.py
new file mode 100644
index 000000000..ce47e8c75
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_log/log.py
@@ -0,0 +1,186 @@
+"""
+basic logging functionality based on a producer/consumer scheme.
+
+XXX implement this API: (maybe put it into slogger.py?)
+
+ log = Logger(
+ info=py.log.STDOUT,
+ debug=py.log.STDOUT,
+ command=None)
+ log.info("hello", "world")
+ log.command("hello", "world")
+
+ log = Logger(info=Logger(something=...),
+ debug=py.log.STDOUT,
+ command=None)
+"""
+import py, sys
+
+class Message(object):
+ def __init__(self, keywords, args):
+ self.keywords = keywords
+ self.args = args
+
+ def content(self):
+ return " ".join(map(str, self.args))
+
+ def prefix(self):
+ return "[%s] " % (":".join(self.keywords))
+
+ def __str__(self):
+ return self.prefix() + self.content()
+
+
+class Producer(object):
+ """ (deprecated) Log producer API which sends messages to be logged
+ to a 'consumer' object, which then prints them to stdout,
+ stderr, files, etc. Used extensively by PyPy-1.1.
+ """
+
+ Message = Message # to allow later customization
+ keywords2consumer = {}
+
+ def __init__(self, keywords, keywordmapper=None, **kw):
+ if hasattr(keywords, 'split'):
+ keywords = tuple(keywords.split())
+ self._keywords = keywords
+ if keywordmapper is None:
+ keywordmapper = default_keywordmapper
+ self._keywordmapper = keywordmapper
+
+ def __repr__(self):
+ return "<py.log.Producer %s>" % ":".join(self._keywords)
+
+ def __getattr__(self, name):
+ if '_' in name:
+ raise AttributeError(name)
+ producer = self.__class__(self._keywords + (name,))
+ setattr(self, name, producer)
+ return producer
+
+ def __call__(self, *args):
+ """ write a message to the appropriate consumer(s) """
+ func = self._keywordmapper.getconsumer(self._keywords)
+ if func is not None:
+ func(self.Message(self._keywords, args))
+
+class KeywordMapper:
+ def __init__(self):
+ self.keywords2consumer = {}
+
+ def getstate(self):
+ return self.keywords2consumer.copy()
+ def setstate(self, state):
+ self.keywords2consumer.clear()
+ self.keywords2consumer.update(state)
+
+ def getconsumer(self, keywords):
+ """ return a consumer matching the given keywords.
+
+ tries to find the most suitable consumer by walking, starting from
+ the back, the list of keywords, the first consumer matching a
+ keyword is returned (falling back to py.log.default)
+ """
+ for i in range(len(keywords), 0, -1):
+ try:
+ return self.keywords2consumer[keywords[:i]]
+ except KeyError:
+ continue
+ return self.keywords2consumer.get('default', default_consumer)
+
+ def setconsumer(self, keywords, consumer):
+ """ set a consumer for a set of keywords. """
+ # normalize to tuples
+ if isinstance(keywords, str):
+ keywords = tuple(filter(None, keywords.split()))
+ elif hasattr(keywords, '_keywords'):
+ keywords = keywords._keywords
+ elif not isinstance(keywords, tuple):
+ raise TypeError("key %r is not a string or tuple" % (keywords,))
+ if consumer is not None and not py.builtin.callable(consumer):
+ if not hasattr(consumer, 'write'):
+ raise TypeError(
+ "%r should be None, callable or file-like" % (consumer,))
+ consumer = File(consumer)
+ self.keywords2consumer[keywords] = consumer
+
+def default_consumer(msg):
+ """ the default consumer, prints the message to stdout (using 'print') """
+ sys.stderr.write(str(msg)+"\n")
+
+default_keywordmapper = KeywordMapper()
+
+def setconsumer(keywords, consumer):
+ default_keywordmapper.setconsumer(keywords, consumer)
+
+def setstate(state):
+ default_keywordmapper.setstate(state)
+def getstate():
+ return default_keywordmapper.getstate()
+
+#
+# Consumers
+#
+
+class File(object):
+ """ log consumer wrapping a file(-like) object """
+ def __init__(self, f):
+ assert hasattr(f, 'write')
+ #assert isinstance(f, file) or not hasattr(f, 'open')
+ self._file = f
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ self._file.write(str(msg) + "\n")
+ if hasattr(self._file, 'flush'):
+ self._file.flush()
+
+class Path(object):
+ """ log consumer that opens and writes to a Path """
+ def __init__(self, filename, append=False,
+ delayed_create=False, buffering=False):
+ self._append = append
+ self._filename = str(filename)
+ self._buffering = buffering
+ if not delayed_create:
+ self._openfile()
+
+ def _openfile(self):
+ mode = self._append and 'a' or 'w'
+ f = open(self._filename, mode)
+ self._file = f
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ if not hasattr(self, "_file"):
+ self._openfile()
+ self._file.write(str(msg) + "\n")
+ if not self._buffering:
+ self._file.flush()
+
+def STDOUT(msg):
+ """ consumer that writes to sys.stdout """
+ sys.stdout.write(str(msg)+"\n")
+
+def STDERR(msg):
+ """ consumer that writes to sys.stderr """
+ sys.stderr.write(str(msg)+"\n")
+
+class Syslog:
+ """ consumer that writes to the syslog daemon """
+
+ def __init__(self, priority = None):
+ if priority is None:
+ priority = self.LOG_INFO
+ self.priority = priority
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ py.std.syslog.syslog(self.priority, str(msg))
+
+for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
+ _prio = "LOG_" + _prio
+ try:
+ setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
+ except AttributeError:
+ pass
diff --git a/testing/web-platform/tests/tools/py/py/_log/warning.py b/testing/web-platform/tests/tools/py/py/_log/warning.py
new file mode 100644
index 000000000..722e31e91
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_log/warning.py
@@ -0,0 +1,76 @@
+import py, sys
+
+class DeprecationWarning(DeprecationWarning):
+ def __init__(self, msg, path, lineno):
+ self.msg = msg
+ self.path = path
+ self.lineno = lineno
+ def __repr__(self):
+ return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
+ def __str__(self):
+ return self.msg
+
+def _apiwarn(startversion, msg, stacklevel=2, function=None):
+ # below is mostly COPIED from python2.4/warnings.py's def warn()
+ # Get context information
+ if isinstance(stacklevel, str):
+ frame = sys._getframe(1)
+ level = 1
+ found = frame.f_code.co_filename.find(stacklevel) != -1
+ while frame:
+ co = frame.f_code
+ if co.co_filename.find(stacklevel) == -1:
+ if found:
+ stacklevel = level
+ break
+ else:
+ found = True
+ level += 1
+ frame = frame.f_back
+ else:
+ stacklevel = 1
+ msg = "%s (since version %s)" %(msg, startversion)
+ warn(msg, stacklevel=stacklevel+1, function=function)
+
+def warn(msg, stacklevel=1, function=None):
+ if function is not None:
+ filename = py.std.inspect.getfile(function)
+ lineno = py.code.getrawcode(function).co_firstlineno
+ else:
+ try:
+ caller = sys._getframe(stacklevel)
+ except ValueError:
+ globals = sys.__dict__
+ lineno = 1
+ else:
+ globals = caller.f_globals
+ lineno = caller.f_lineno
+ if '__name__' in globals:
+ module = globals['__name__']
+ else:
+ module = "<string>"
+ filename = globals.get('__file__')
+ if filename:
+ fnl = filename.lower()
+ if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
+ filename = filename[:-1]
+ elif fnl.endswith("$py.class"):
+ filename = filename.replace('$py.class', '.py')
+ else:
+ if module == "__main__":
+ try:
+ filename = sys.argv[0]
+ except AttributeError:
+ # embedded interpreters don't have sys.argv, see bug #839151
+ filename = '__main__'
+ if not filename:
+ filename = module
+ path = py.path.local(filename)
+ warning = DeprecationWarning(msg, path, lineno)
+ py.std.warnings.warn_explicit(warning, category=Warning,
+ filename=str(warning.path),
+ lineno=warning.lineno,
+ registry=py.std.warnings.__dict__.setdefault(
+ "__warningsregistry__", {})
+ )
+
diff --git a/testing/web-platform/tests/tools/py/py/_path/__init__.py b/testing/web-platform/tests/tools/py/py/_path/__init__.py
new file mode 100644
index 000000000..51f3246f8
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_path/__init__.py
@@ -0,0 +1 @@
+""" unified file system api """
diff --git a/testing/web-platform/tests/tools/py/py/_path/cacheutil.py b/testing/web-platform/tests/tools/py/py/_path/cacheutil.py
new file mode 100644
index 000000000..992250475
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_path/cacheutil.py
@@ -0,0 +1,114 @@
+"""
+This module contains multithread-safe cache implementations.
+
+All Caches have
+
+ getorbuild(key, builder)
+ delentry(key)
+
+methods and allow configuration when instantiating the cache class.
+"""
+from time import time as gettime
+
+class BasicCache(object):
+ def __init__(self, maxentries=128):
+ self.maxentries = maxentries
+ self.prunenum = int(maxentries - maxentries/8)
+ self._dict = {}
+
+ def clear(self):
+ self._dict.clear()
+
+ def _getentry(self, key):
+ return self._dict[key]
+
+ def _putentry(self, key, entry):
+ self._prunelowestweight()
+ self._dict[key] = entry
+
+ def delentry(self, key, raising=False):
+ try:
+ del self._dict[key]
+ except KeyError:
+ if raising:
+ raise
+
+ def getorbuild(self, key, builder):
+ try:
+ entry = self._getentry(key)
+ except KeyError:
+ entry = self._build(key, builder)
+ self._putentry(key, entry)
+ return entry.value
+
+ def _prunelowestweight(self):
+ """ prune out entries with lowest weight. """
+ numentries = len(self._dict)
+ if numentries >= self.maxentries:
+ # evict according to entry's weight
+ items = [(entry.weight, key)
+ for key, entry in self._dict.items()]
+ items.sort()
+ index = numentries - self.prunenum
+ if index > 0:
+ for weight, key in items[:index]:
+ # in MT situations the element might be gone
+ self.delentry(key, raising=False)
+
+class BuildcostAccessCache(BasicCache):
+ """ A BuildTime/Access-counting cache implementation.
+ the weight of a value is computed as the product of
+
+ num-accesses-of-a-value * time-to-build-the-value
+
+ The values with the least such weights are evicted
+ if the cache maxentries threshold is superceded.
+ For implementation flexibility more than one object
+ might be evicted at a time.
+ """
+ # time function to use for measuring build-times
+
+ def _build(self, key, builder):
+ start = gettime()
+ val = builder()
+ end = gettime()
+ return WeightedCountingEntry(val, end-start)
+
+
+class WeightedCountingEntry(object):
+ def __init__(self, value, oneweight):
+ self._value = value
+ self.weight = self._oneweight = oneweight
+
+ def value(self):
+ self.weight += self._oneweight
+ return self._value
+ value = property(value)
+
+class AgingCache(BasicCache):
+ """ This cache prunes out cache entries that are too old.
+ """
+ def __init__(self, maxentries=128, maxseconds=10.0):
+ super(AgingCache, self).__init__(maxentries)
+ self.maxseconds = maxseconds
+
+ def _getentry(self, key):
+ entry = self._dict[key]
+ if entry.isexpired():
+ self.delentry(key)
+ raise KeyError(key)
+ return entry
+
+ def _build(self, key, builder):
+ val = builder()
+ entry = AgingEntry(val, gettime() + self.maxseconds)
+ return entry
+
+class AgingEntry(object):
+ def __init__(self, value, expirationtime):
+ self.value = value
+ self.weight = expirationtime
+
+ def isexpired(self):
+ t = gettime()
+ return t >= self.weight
diff --git a/testing/web-platform/tests/tools/py/py/_path/common.py b/testing/web-platform/tests/tools/py/py/_path/common.py
new file mode 100644
index 000000000..d407434cb
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_path/common.py
@@ -0,0 +1,403 @@
+"""
+"""
+import os, sys, posixpath
+import py
+
+# Moved from local.py.
+iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
+
+class Checkers:
+ _depend_on_existence = 'exists', 'link', 'dir', 'file'
+
+ def __init__(self, path):
+ self.path = path
+
+ def dir(self):
+ raise NotImplementedError
+
+ def file(self):
+ raise NotImplementedError
+
+ def dotfile(self):
+ return self.path.basename.startswith('.')
+
+ def ext(self, arg):
+ if not arg.startswith('.'):
+ arg = '.' + arg
+ return self.path.ext == arg
+
+ def exists(self):
+ raise NotImplementedError
+
+ def basename(self, arg):
+ return self.path.basename == arg
+
+ def basestarts(self, arg):
+ return self.path.basename.startswith(arg)
+
+ def relto(self, arg):
+ return self.path.relto(arg)
+
+ def fnmatch(self, arg):
+ return self.path.fnmatch(arg)
+
+ def endswith(self, arg):
+ return str(self.path).endswith(arg)
+
+ def _evaluate(self, kw):
+ for name, value in kw.items():
+ invert = False
+ meth = None
+ try:
+ meth = getattr(self, name)
+ except AttributeError:
+ if name[:3] == 'not':
+ invert = True
+ try:
+ meth = getattr(self, name[3:])
+ except AttributeError:
+ pass
+ if meth is None:
+ raise TypeError(
+ "no %r checker available for %r" % (name, self.path))
+ try:
+ if py.code.getrawcode(meth).co_argcount > 1:
+ if (not meth(value)) ^ invert:
+ return False
+ else:
+ if bool(value) ^ bool(meth()) ^ invert:
+ return False
+ except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
+ # EBUSY feels not entirely correct,
+ # but its kind of necessary since ENOMEDIUM
+ # is not accessible in python
+ for name in self._depend_on_existence:
+ if name in kw:
+ if kw.get(name):
+ return False
+ name = 'not' + name
+ if name in kw:
+ if not kw.get(name):
+ return False
+ return True
+
+class NeverRaised(Exception):
+ pass
+
+class PathBase(object):
+ """ shared implementation for filesystem path objects."""
+ Checkers = Checkers
+
+ def __div__(self, other):
+ return self.join(str(other))
+ __truediv__ = __div__ # py3k
+
+ def basename(self):
+ """ basename part of path. """
+ return self._getbyspec('basename')[0]
+ basename = property(basename, None, None, basename.__doc__)
+
+ def dirname(self):
+ """ dirname part of path. """
+ return self._getbyspec('dirname')[0]
+ dirname = property(dirname, None, None, dirname.__doc__)
+
+ def purebasename(self):
+ """ pure base name of the path."""
+ return self._getbyspec('purebasename')[0]
+ purebasename = property(purebasename, None, None, purebasename.__doc__)
+
+ def ext(self):
+ """ extension of the path (including the '.')."""
+ return self._getbyspec('ext')[0]
+ ext = property(ext, None, None, ext.__doc__)
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path joined with any given path arguments. """
+ return self.new(basename='').join(*args, **kwargs)
+
+ def read_binary(self):
+ """ read and return a bytestring from reading the path. """
+ with self.open('rb') as f:
+ return f.read()
+
+ def read_text(self, encoding):
+ """ read and return a Unicode string from reading the path. """
+ with self.open("r", encoding=encoding) as f:
+ return f.read()
+
+
+ def read(self, mode='r'):
+ """ read and return a bytestring from reading the path. """
+ with self.open(mode) as f:
+ return f.read()
+
+ def readlines(self, cr=1):
+ """ read and return a list of lines from the path. if cr is False, the
+newline will be removed from the end of each line. """
+ if not cr:
+ content = self.read('rU')
+ return content.split('\n')
+ else:
+ f = self.open('rU')
+ try:
+ return f.readlines()
+ finally:
+ f.close()
+
+ def load(self):
+ """ (deprecated) return object unpickled from self.read() """
+ f = self.open('rb')
+ try:
+ return py.error.checked_call(py.std.pickle.load, f)
+ finally:
+ f.close()
+
+ def move(self, target):
+ """ move this path to target. """
+ if target.relto(self):
+ raise py.error.EINVAL(target,
+ "cannot move path into a subdirectory of itself")
+ try:
+ self.rename(target)
+ except py.error.EXDEV: # invalid cross-device link
+ self.copy(target)
+ self.remove()
+
+ def __repr__(self):
+ """ return a string representation of this path. """
+ return repr(str(self))
+
+ def check(self, **kw):
+ """ check a path for existence and properties.
+
+ Without arguments, return True if the path exists, otherwise False.
+
+ valid checkers::
+
+ file=1 # is a file
+ file=0 # is not a file (may not even exist)
+ dir=1 # is a dir
+ link=1 # is a link
+ exists=1 # exists
+
+ You can specify multiple checker definitions, for example::
+
+ path.check(file=1, link=1) # a link pointing to a file
+ """
+ if not kw:
+ kw = {'exists' : 1}
+ return self.Checkers(self)._evaluate(kw)
+
+ def fnmatch(self, pattern):
+ """return true if the basename/fullname matches the glob-'pattern'.
+
+ valid pattern characters::
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ If the pattern contains a path-separator then the full path
+ is used for pattern matching and a '*' is prepended to the
+ pattern.
+
+ if the pattern doesn't contain a path-separator the pattern
+ is only matched against the basename.
+ """
+ return FNMatcher(pattern)(self)
+
+ def relto(self, relpath):
+ """ return a string which is the relative part of the path
+ to the given 'relpath'.
+ """
+ if not isinstance(relpath, (str, PathBase)):
+ raise TypeError("%r: not a string or path object" %(relpath,))
+ strrelpath = str(relpath)
+ if strrelpath and strrelpath[-1] != self.sep:
+ strrelpath += self.sep
+ #assert strrelpath[-1] == self.sep
+ #assert strrelpath[-2] != self.sep
+ strself = self.strpath
+ if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
+ if os.path.normcase(strself).startswith(
+ os.path.normcase(strrelpath)):
+ return strself[len(strrelpath):]
+ elif strself.startswith(strrelpath):
+ return strself[len(strrelpath):]
+ return ""
+
+ def ensure_dir(self, *args):
+ """ ensure the path joined with args is a directory. """
+ return self.ensure(*args, **{"dir": True})
+
+ def bestrelpath(self, dest):
+ """ return a string which is a relative path from self
+ (assumed to be a directory) to dest such that
+ self.join(bestrelpath) == dest and if not such
+ path can be determined return dest.
+ """
+ try:
+ if self == dest:
+ return os.curdir
+ base = self.common(dest)
+ if not base: # can be the case on windows
+ return str(dest)
+ self2base = self.relto(base)
+ reldest = dest.relto(base)
+ if self2base:
+ n = self2base.count(self.sep) + 1
+ else:
+ n = 0
+ l = [os.pardir] * n
+ if reldest:
+ l.append(reldest)
+ target = dest.sep.join(l)
+ return target
+ except AttributeError:
+ return str(dest)
+
+ def exists(self):
+ return self.check()
+
+ def isdir(self):
+ return self.check(dir=1)
+
+ def isfile(self):
+ return self.check(file=1)
+
+ def parts(self, reverse=False):
+ """ return a root-first list of all ancestor directories
+ plus the path itself.
+ """
+ current = self
+ l = [self]
+ while 1:
+ last = current
+ current = current.dirpath()
+ if last == current:
+ break
+ l.append(current)
+ if not reverse:
+ l.reverse()
+ return l
+
+ def common(self, other):
+ """ return the common part shared with the other path
+ or None if there is no common part.
+ """
+ last = None
+ for x, y in zip(self.parts(), other.parts()):
+ if x != y:
+ return last
+ last = x
+ return last
+
+ def __add__(self, other):
+ """ return new path object with 'other' added to the basename"""
+ return self.new(basename=self.basename+str(other))
+
+ def __cmp__(self, other):
+ """ return sort value (-1, 0, +1). """
+ try:
+ return cmp(self.strpath, other.strpath)
+ except AttributeError:
+ return cmp(str(self), str(other)) # self.path, other.path)
+
+ def __lt__(self, other):
+ try:
+ return self.strpath < other.strpath
+ except AttributeError:
+ return str(self) < str(other)
+
+ def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
+ """ yields all paths below the current one
+
+ fil is a filter (glob pattern or callable), if not matching the
+ path will not be yielded, defaulting to None (everything is
+ returned)
+
+ rec is a filter (glob pattern or callable) that controls whether
+ a node is descended, defaulting to None
+
+ ignore is an Exception class that is ignoredwhen calling dirlist()
+ on any of the paths (by default, all exceptions are reported)
+
+ bf if True will cause a breadthfirst search instead of the
+ default depthfirst. Default: False
+
+ sort if True will sort entries within each directory level.
+ """
+ for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
+ yield x
+
+ def _sortlist(self, res, sort):
+ if sort:
+ if hasattr(sort, '__call__'):
+ res.sort(sort)
+ else:
+ res.sort()
+
+ def samefile(self, other):
+ """ return True if other refers to the same stat object as self. """
+ return self.strpath == str(other)
+
+class Visitor:
+ def __init__(self, fil, rec, ignore, bf, sort):
+ if isinstance(fil, str):
+ fil = FNMatcher(fil)
+ if isinstance(rec, str):
+ self.rec = FNMatcher(rec)
+ elif not hasattr(rec, '__call__') and rec:
+ self.rec = lambda path: True
+ else:
+ self.rec = rec
+ self.fil = fil
+ self.ignore = ignore
+ self.breadthfirst = bf
+ self.optsort = sort and sorted or (lambda x: x)
+
+ def gen(self, path):
+ try:
+ entries = path.listdir()
+ except self.ignore:
+ return
+ rec = self.rec
+ dirs = self.optsort([p for p in entries
+ if p.check(dir=1) and (rec is None or rec(p))])
+ if not self.breadthfirst:
+ for subdir in dirs:
+ for p in self.gen(subdir):
+ yield p
+ for p in self.optsort(entries):
+ if self.fil is None or self.fil(p):
+ yield p
+ if self.breadthfirst:
+ for subdir in dirs:
+ for p in self.gen(subdir):
+ yield p
+
+class FNMatcher:
+ def __init__(self, pattern):
+ self.pattern = pattern
+
+ def __call__(self, path):
+ pattern = self.pattern
+
+ if (pattern.find(path.sep) == -1 and
+ iswin32 and
+ pattern.find(posixpath.sep) != -1):
+ # Running on Windows, the pattern has no Windows path separators,
+ # and the pattern has one or more Posix path separators. Replace
+ # the Posix path separators with the Windows path separator.
+ pattern = pattern.replace(posixpath.sep, path.sep)
+
+ if pattern.find(path.sep) == -1:
+ name = path.basename
+ else:
+ name = str(path) # path.strpath # XXX svn?
+ if not os.path.isabs(pattern):
+ pattern = '*' + path.sep + pattern
+ return py.std.fnmatch.fnmatch(name, pattern)
+
diff --git a/testing/web-platform/tests/tools/py/py/_path/local.py b/testing/web-platform/tests/tools/py/py/_path/local.py
new file mode 100644
index 000000000..d569404ec
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_path/local.py
@@ -0,0 +1,911 @@
+"""
+local path implementation.
+"""
+from __future__ import with_statement
+
+from contextlib import contextmanager
+import sys, os, re, atexit, io
+import py
+from py._path import common
+from py._path.common import iswin32
+from stat import S_ISLNK, S_ISDIR, S_ISREG
+
+from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
+
+if sys.version_info > (3,0):
+ def map_as_list(func, iter):
+ return list(map(func, iter))
+else:
+ map_as_list = map
+
+class Stat(object):
+ def __getattr__(self, name):
+ return getattr(self._osstatresult, "st_" + name)
+
+ def __init__(self, path, osstatresult):
+ self.path = path
+ self._osstatresult = osstatresult
+
+ @property
+ def owner(self):
+ if iswin32:
+ raise NotImplementedError("XXX win32")
+ import pwd
+ entry = py.error.checked_call(pwd.getpwuid, self.uid)
+ return entry[0]
+
+ @property
+ def group(self):
+ """ return group name of file. """
+ if iswin32:
+ raise NotImplementedError("XXX win32")
+ import grp
+ entry = py.error.checked_call(grp.getgrgid, self.gid)
+ return entry[0]
+
+ def isdir(self):
+ return S_ISDIR(self._osstatresult.st_mode)
+
+ def isfile(self):
+ return S_ISREG(self._osstatresult.st_mode)
+
+ def islink(self):
+ st = self.path.lstat()
+ return S_ISLNK(self._osstatresult.st_mode)
+
+class PosixPath(common.PathBase):
+ def chown(self, user, group, rec=0):
+ """ change ownership to the given user and group.
+ user and group may be specified by a number or
+ by a name. if rec is True change ownership
+ recursively.
+ """
+ uid = getuserid(user)
+ gid = getgroupid(group)
+ if rec:
+ for x in self.visit(rec=lambda x: x.check(link=0)):
+ if x.check(link=0):
+ py.error.checked_call(os.chown, str(x), uid, gid)
+ py.error.checked_call(os.chown, str(self), uid, gid)
+
+ def readlink(self):
+ """ return value of a symbolic link. """
+ return py.error.checked_call(os.readlink, self.strpath)
+
+ def mklinkto(self, oldname):
+ """ posix style hard link to another name. """
+ py.error.checked_call(os.link, str(oldname), str(self))
+
+ def mksymlinkto(self, value, absolute=1):
+ """ create a symbolic link with the given value (pointing to another name). """
+ if absolute:
+ py.error.checked_call(os.symlink, str(value), self.strpath)
+ else:
+ base = self.common(value)
+ # with posix local paths '/' is always a common base
+ relsource = self.__class__(value).relto(base)
+ reldest = self.relto(base)
+ n = reldest.count(self.sep)
+ target = self.sep.join(('..', )*n + (relsource, ))
+ py.error.checked_call(os.symlink, target, self.strpath)
+
+def getuserid(user):
+ import pwd
+ if not isinstance(user, int):
+ user = pwd.getpwnam(user)[2]
+ return user
+
+def getgroupid(group):
+ import grp
+ if not isinstance(group, int):
+ group = grp.getgrnam(group)[2]
+ return group
+
+FSBase = not iswin32 and PosixPath or common.PathBase
+
+class LocalPath(FSBase):
+ """ object oriented interface to os.path and other local filesystem
+ related information.
+ """
+ class ImportMismatchError(ImportError):
+ """ raised on pyimport() if there is a mismatch of __file__'s"""
+
+ sep = os.sep
+ class Checkers(common.Checkers):
+ def _stat(self):
+ try:
+ return self._statcache
+ except AttributeError:
+ try:
+ self._statcache = self.path.stat()
+ except py.error.ELOOP:
+ self._statcache = self.path.lstat()
+ return self._statcache
+
+ def dir(self):
+ return S_ISDIR(self._stat().mode)
+
+ def file(self):
+ return S_ISREG(self._stat().mode)
+
+ def exists(self):
+ return self._stat()
+
+ def link(self):
+ st = self.path.lstat()
+ return S_ISLNK(st.mode)
+
+ def __init__(self, path=None, expanduser=False):
+ """ Initialize and return a local Path instance.
+
+ Path can be relative to the current directory.
+ If path is None it defaults to the current working directory.
+ If expanduser is True, tilde-expansion is performed.
+ Note that Path instances always carry an absolute path.
+ Note also that passing in a local path object will simply return
+ the exact same path object. Use new() to get a new copy.
+ """
+ if path is None:
+ self.strpath = py.error.checked_call(os.getcwd)
+ elif isinstance(path, common.PathBase):
+ self.strpath = path.strpath
+ elif isinstance(path, py.builtin._basestring):
+ if expanduser:
+ path = os.path.expanduser(path)
+ self.strpath = abspath(path)
+ else:
+ raise ValueError("can only pass None, Path instances "
+ "or non-empty strings to LocalPath")
+
+ def __hash__(self):
+ return hash(self.strpath)
+
+ def __eq__(self, other):
+ s1 = self.strpath
+ s2 = getattr(other, "strpath", other)
+ if iswin32:
+ s1 = s1.lower()
+ try:
+ s2 = s2.lower()
+ except AttributeError:
+ return False
+ return s1 == s2
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __lt__(self, other):
+ return self.strpath < getattr(other, "strpath", other)
+
+ def __gt__(self, other):
+ return self.strpath > getattr(other, "strpath", other)
+
+ def samefile(self, other):
+ """ return True if 'other' references the same file as 'self'.
+ """
+ other = getattr(other, "strpath", other)
+ if not isabs(other):
+ other = abspath(other)
+ if self == other:
+ return True
+ if iswin32:
+ return False # there is no samefile
+ return py.error.checked_call(
+ os.path.samefile, self.strpath, other)
+
+ def remove(self, rec=1, ignore_errors=False):
+ """ remove a file or directory (or a directory tree if rec=1).
+ if ignore_errors is True, errors while removing directories will
+ be ignored.
+ """
+ if self.check(dir=1, link=0):
+ if rec:
+ # force remove of readonly files on windows
+ if iswin32:
+ self.chmod(448, rec=1) # octcal 0700
+ py.error.checked_call(py.std.shutil.rmtree, self.strpath,
+ ignore_errors=ignore_errors)
+ else:
+ py.error.checked_call(os.rmdir, self.strpath)
+ else:
+ if iswin32:
+ self.chmod(448) # octcal 0700
+ py.error.checked_call(os.remove, self.strpath)
+
+ def computehash(self, hashtype="md5", chunksize=524288):
+ """ return hexdigest of hashvalue for this file. """
+ try:
+ try:
+ import hashlib as mod
+ except ImportError:
+ if hashtype == "sha1":
+ hashtype = "sha"
+ mod = __import__(hashtype)
+ hash = getattr(mod, hashtype)()
+ except (AttributeError, ImportError):
+ raise ValueError("Don't know how to compute %r hash" %(hashtype,))
+ f = self.open('rb')
+ try:
+ while 1:
+ buf = f.read(chunksize)
+ if not buf:
+ return hash.hexdigest()
+ hash.update(buf)
+ finally:
+ f.close()
+
+ def new(self, **kw):
+ """ create a modified version of this path.
+ the following keyword arguments modify various path parts::
+
+ a:/some/path/to/a/file.ext
+ xx drive
+ xxxxxxxxxxxxxxxxx dirname
+ xxxxxxxx basename
+ xxxx purebasename
+ xxx ext
+ """
+ obj = object.__new__(self.__class__)
+ if not kw:
+ obj.strpath = self.strpath
+ return obj
+ drive, dirname, basename, purebasename,ext = self._getbyspec(
+ "drive,dirname,basename,purebasename,ext")
+ if 'basename' in kw:
+ if 'purebasename' in kw or 'ext' in kw:
+ raise ValueError("invalid specification %r" % kw)
+ else:
+ pb = kw.setdefault('purebasename', purebasename)
+ try:
+ ext = kw['ext']
+ except KeyError:
+ pass
+ else:
+ if ext and not ext.startswith('.'):
+ ext = '.' + ext
+ kw['basename'] = pb + ext
+
+ if ('dirname' in kw and not kw['dirname']):
+ kw['dirname'] = drive
+ else:
+ kw.setdefault('dirname', dirname)
+ kw.setdefault('sep', self.sep)
+ obj.strpath = normpath(
+ "%(dirname)s%(sep)s%(basename)s" % kw)
+ return obj
+
+ def _getbyspec(self, spec):
+ """ see new for what 'spec' can be. """
+ res = []
+ parts = self.strpath.split(self.sep)
+
+ args = filter(None, spec.split(',') )
+ append = res.append
+ for name in args:
+ if name == 'drive':
+ append(parts[0])
+ elif name == 'dirname':
+ append(self.sep.join(parts[:-1]))
+ else:
+ basename = parts[-1]
+ if name == 'basename':
+ append(basename)
+ else:
+ i = basename.rfind('.')
+ if i == -1:
+ purebasename, ext = basename, ''
+ else:
+ purebasename, ext = basename[:i], basename[i:]
+ if name == 'purebasename':
+ append(purebasename)
+ elif name == 'ext':
+ append(ext)
+ else:
+ raise ValueError("invalid part specification %r" % name)
+ return res
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path joined with any given path arguments. """
+ if not kwargs:
+ path = object.__new__(self.__class__)
+ path.strpath = dirname(self.strpath)
+ if args:
+ path = path.join(*args)
+ return path
+ return super(LocalPath, self).dirpath(*args, **kwargs)
+
+ def join(self, *args, **kwargs):
+ """ return a new path by appending all 'args' as path
+ components. if abs=1 is used restart from root if any
+ of the args is an absolute path.
+ """
+ sep = self.sep
+ strargs = [getattr(arg, "strpath", arg) for arg in args]
+ strpath = self.strpath
+ if kwargs.get('abs'):
+ newargs = []
+ for arg in reversed(strargs):
+ if isabs(arg):
+ strpath = arg
+ strargs = newargs
+ break
+ newargs.insert(0, arg)
+ for arg in strargs:
+ arg = arg.strip(sep)
+ if iswin32:
+ # allow unix style paths even on windows.
+ arg = arg.strip('/')
+ arg = arg.replace('/', sep)
+ strpath = strpath + sep + arg
+ obj = object.__new__(self.__class__)
+ obj.strpath = normpath(strpath)
+ return obj
+
+ def open(self, mode='r', ensure=False, encoding=None):
+ """ return an opened file with the given mode.
+
+ If ensure is True, create parent directories if needed.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ if encoding:
+ return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
+ return py.error.checked_call(open, self.strpath, mode)
+
+ def _fastjoin(self, name):
+ child = object.__new__(self.__class__)
+ child.strpath = self.strpath + self.sep + name
+ return child
+
+ def islink(self):
+ return islink(self.strpath)
+
+ def check(self, **kw):
+ if not kw:
+ return exists(self.strpath)
+ if len(kw) == 1:
+ if "dir" in kw:
+ return not kw["dir"] ^ isdir(self.strpath)
+ if "file" in kw:
+ return not kw["file"] ^ isfile(self.strpath)
+ return super(LocalPath, self).check(**kw)
+
+ _patternchars = set("*?[" + os.path.sep)
+ def listdir(self, fil=None, sort=None):
+ """ list directory contents, possibly filter by the given fil func
+ and possibly sorted.
+ """
+ if fil is None and sort is None:
+ names = py.error.checked_call(os.listdir, self.strpath)
+ return map_as_list(self._fastjoin, names)
+ if isinstance(fil, py.builtin._basestring):
+ if not self._patternchars.intersection(fil):
+ child = self._fastjoin(fil)
+ if exists(child.strpath):
+ return [child]
+ return []
+ fil = common.FNMatcher(fil)
+ names = py.error.checked_call(os.listdir, self.strpath)
+ res = []
+ for name in names:
+ child = self._fastjoin(name)
+ if fil is None or fil(child):
+ res.append(child)
+ self._sortlist(res, sort)
+ return res
+
+ def size(self):
+ """ return size of the underlying file object """
+ return self.stat().size
+
+ def mtime(self):
+ """ return last modification time of the path. """
+ return self.stat().mtime
+
+ def copy(self, target, mode=False):
+ """ copy path to target."""
+ if self.check(file=1):
+ if target.check(dir=1):
+ target = target.join(self.basename)
+ assert self!=target
+ copychunked(self, target)
+ if mode:
+ copymode(self.strpath, target.strpath)
+ else:
+ def rec(p):
+ return p.check(link=0)
+ for x in self.visit(rec=rec):
+ relpath = x.relto(self)
+ newx = target.join(relpath)
+ newx.dirpath().ensure(dir=1)
+ if x.check(link=1):
+ newx.mksymlinkto(x.readlink())
+ continue
+ elif x.check(file=1):
+ copychunked(x, newx)
+ elif x.check(dir=1):
+ newx.ensure(dir=1)
+ if mode:
+ copymode(x.strpath, newx.strpath)
+
+ def rename(self, target):
+ """ rename this path to target. """
+ target = getattr(target, "strpath", target)
+ return py.error.checked_call(os.rename, self.strpath, target)
+
+ def dump(self, obj, bin=1):
+ """ pickle object into path location"""
+ f = self.open('wb')
+ try:
+ py.error.checked_call(py.std.pickle.dump, obj, f, bin)
+ finally:
+ f.close()
+
+ def mkdir(self, *args):
+ """ create & return the directory joined with args. """
+ p = self.join(*args)
+ py.error.checked_call(os.mkdir, getattr(p, "strpath", p))
+ return p
+
+ def write_binary(self, data, ensure=False):
+ """ write binary data into path. If ensure is True create
+ missing parent directories.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ with self.open('wb') as f:
+ f.write(data)
+
+ def write_text(self, data, encoding, ensure=False):
+ """ write text data into path using the specified encoding.
+ If ensure is True create missing parent directories.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ with self.open('w', encoding=encoding) as f:
+ f.write(data)
+
+ def write(self, data, mode='w', ensure=False):
+ """ write data into path. If ensure is True create
+ missing parent directories.
+ """
+ if ensure:
+ self.dirpath().ensure(dir=1)
+ if 'b' in mode:
+ if not py.builtin._isbytes(data):
+ raise ValueError("can only process bytes")
+ else:
+ if not py.builtin._istext(data):
+ if not py.builtin._isbytes(data):
+ data = str(data)
+ else:
+ data = py.builtin._totext(data, sys.getdefaultencoding())
+ f = self.open(mode)
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+ def _ensuredirs(self):
+ parent = self.dirpath()
+ if parent == self:
+ return self
+ if parent.check(dir=0):
+ parent._ensuredirs()
+ if self.check(dir=0):
+ try:
+ self.mkdir()
+ except py.error.EEXIST:
+ # race condition: file/dir created by another thread/process.
+ # complain if it is not a dir
+ if self.check(dir=0):
+ raise
+ return self
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). if you specify a keyword argument 'dir=True'
+ then the path is forced to be a directory path.
+ """
+ p = self.join(*args)
+ if kwargs.get('dir', 0):
+ return p._ensuredirs()
+ else:
+ p.dirpath()._ensuredirs()
+ if not p.check(file=1):
+ p.open('w').close()
+ return p
+
+ def stat(self, raising=True):
+ """ Return an os.stat() tuple. """
+ if raising == True:
+ return Stat(self, py.error.checked_call(os.stat, self.strpath))
+ try:
+ return Stat(self, os.stat(self.strpath))
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return None
+
+ def lstat(self):
+ """ Return an os.lstat() tuple. """
+ return Stat(self, py.error.checked_call(os.lstat, self.strpath))
+
+ def setmtime(self, mtime=None):
+ """ set modification time for the given path. if 'mtime' is None
+ (the default) then the file's mtime is set to current time.
+
+ Note that the resolution for 'mtime' is platform dependent.
+ """
+ if mtime is None:
+ return py.error.checked_call(os.utime, self.strpath, mtime)
+ try:
+ return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
+ except py.error.EINVAL:
+ return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
+
+ def chdir(self):
+ """ change directory to self and return old current directory """
+ try:
+ old = self.__class__()
+ except py.error.ENOENT:
+ old = None
+ py.error.checked_call(os.chdir, self.strpath)
+ return old
+
+
+ @contextmanager
+ def as_cwd(self):
+ """ return context manager which changes to current dir during the
+ managed "with" context. On __enter__ it returns the old dir.
+ """
+ old = self.chdir()
+ try:
+ yield old
+ finally:
+ old.chdir()
+
+ def realpath(self):
+ """ return a new path which contains no symbolic links."""
+ return self.__class__(os.path.realpath(self.strpath))
+
+ def atime(self):
+ """ return last access time of the path. """
+ return self.stat().atime
+
+ def __repr__(self):
+ return 'local(%r)' % self.strpath
+
+ def __str__(self):
+ """ return string representation of the Path. """
+ return self.strpath
+
+ def chmod(self, mode, rec=0):
+ """ change permissions to the given mode. If mode is an
+ integer it directly encodes the os-specific modes.
+ if rec is True perform recursively.
+ """
+ if not isinstance(mode, int):
+ raise TypeError("mode %r must be an integer" % (mode,))
+ if rec:
+ for x in self.visit(rec=rec):
+ py.error.checked_call(os.chmod, str(x), mode)
+ py.error.checked_call(os.chmod, self.strpath, mode)
+
+ def pypkgpath(self):
+ """ return the Python package path by looking for the last
+ directory upwards which still contains an __init__.py.
+ Return None if a pkgpath can not be determined.
+ """
+ pkgpath = None
+ for parent in self.parts(reverse=True):
+ if parent.isdir():
+ if not parent.join('__init__.py').exists():
+ break
+ if not isimportable(parent.basename):
+ break
+ pkgpath = parent
+ return pkgpath
+
+ def _ensuresyspath(self, ensuremode, path):
+ if ensuremode:
+ s = str(path)
+ if ensuremode == "append":
+ if s not in sys.path:
+ sys.path.append(s)
+ else:
+ if s != sys.path[0]:
+ sys.path.insert(0, s)
+
+ def pyimport(self, modname=None, ensuresyspath=True):
+ """ return path as an imported python module.
+
+ If modname is None, look for the containing package
+ and construct an according module name.
+ The module will be put/looked up in sys.modules.
+ if ensuresyspath is True then the root dir for importing
+ the file (taking __init__.py files into account) will
+ be prepended to sys.path if it isn't there already.
+ If ensuresyspath=="append" the root dir will be appended
+ if it isn't already contained in sys.path.
+ if ensuresyspath is False no modification of syspath happens.
+ """
+ if not self.check():
+ raise py.error.ENOENT(self)
+
+ pkgpath = None
+ if modname is None:
+ pkgpath = self.pypkgpath()
+ if pkgpath is not None:
+ pkgroot = pkgpath.dirpath()
+ names = self.new(ext="").relto(pkgroot).split(self.sep)
+ if names[-1] == "__init__":
+ names.pop()
+ modname = ".".join(names)
+ else:
+ pkgroot = self.dirpath()
+ modname = self.purebasename
+
+ self._ensuresyspath(ensuresyspath, pkgroot)
+ __import__(modname)
+ mod = sys.modules[modname]
+ if self.basename == "__init__.py":
+ return mod # we don't check anything as we might
+ # we in a namespace package ... too icky to check
+ modfile = mod.__file__
+ if modfile[-4:] in ('.pyc', '.pyo'):
+ modfile = modfile[:-1]
+ elif modfile.endswith('$py.class'):
+ modfile = modfile[:-9] + '.py'
+ if modfile.endswith(os.path.sep + "__init__.py"):
+ if self.basename != "__init__.py":
+ modfile = modfile[:-12]
+ try:
+ issame = self.samefile(modfile)
+ except py.error.ENOENT:
+ issame = False
+ if not issame:
+ raise self.ImportMismatchError(modname, modfile, self)
+ return mod
+ else:
+ try:
+ return sys.modules[modname]
+ except KeyError:
+ # we have a custom modname, do a pseudo-import
+ mod = py.std.types.ModuleType(modname)
+ mod.__file__ = str(self)
+ sys.modules[modname] = mod
+ try:
+ py.builtin.execfile(str(self), mod.__dict__)
+ except:
+ del sys.modules[modname]
+ raise
+ return mod
+
+ def sysexec(self, *argv, **popen_opts):
+ """ return stdout text from executing a system child process,
+ where the 'self' path points to executable.
+ The process is directly invoked and not through a system shell.
+ """
+ from subprocess import Popen, PIPE
+ argv = map_as_list(str, argv)
+ popen_opts['stdout'] = popen_opts['stderr'] = PIPE
+ proc = Popen([str(self)] + argv, **popen_opts)
+ stdout, stderr = proc.communicate()
+ ret = proc.wait()
+ if py.builtin._isbytes(stdout):
+ stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+ if ret != 0:
+ if py.builtin._isbytes(stderr):
+ stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
+ raise py.process.cmdexec.Error(ret, ret, str(self),
+ stdout, stderr,)
+ return stdout
+
+ def sysfind(cls, name, checker=None, paths=None):
+ """ return a path object found by looking at the systems
+ underlying PATH specification. If the checker is not None
+ it will be invoked to filter matching paths. If a binary
+ cannot be found, None is returned
+ Note: This is probably not working on plain win32 systems
+ but may work on cygwin.
+ """
+ if isabs(name):
+ p = py.path.local(name)
+ if p.check(file=1):
+ return p
+ else:
+ if paths is None:
+ if iswin32:
+ paths = py.std.os.environ['Path'].split(';')
+ if '' not in paths and '.' not in paths:
+ paths.append('.')
+ try:
+ systemroot = os.environ['SYSTEMROOT']
+ except KeyError:
+ pass
+ else:
+ paths = [re.sub('%SystemRoot%', systemroot, path)
+ for path in paths]
+ else:
+ paths = py.std.os.environ['PATH'].split(':')
+ tryadd = []
+ if iswin32:
+ tryadd += os.environ['PATHEXT'].split(os.pathsep)
+ tryadd.append("")
+
+ for x in paths:
+ for addext in tryadd:
+ p = py.path.local(x).join(name, abs=True) + addext
+ try:
+ if p.check(file=1):
+ if checker:
+ if not checker(p):
+ continue
+ return p
+ except py.error.EACCES:
+ pass
+ return None
+ sysfind = classmethod(sysfind)
+
+ def _gethomedir(cls):
+ try:
+ x = os.environ['HOME']
+ except KeyError:
+ try:
+ x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
+ except KeyError:
+ return None
+ return cls(x)
+ _gethomedir = classmethod(_gethomedir)
+
+ #"""
+ #special class constructors for local filesystem paths
+ #"""
+ def get_temproot(cls):
+ """ return the system's temporary directory
+ (where tempfiles are usually created in)
+ """
+ return py.path.local(py.std.tempfile.gettempdir())
+ get_temproot = classmethod(get_temproot)
+
+ def mkdtemp(cls, rootdir=None):
+ """ return a Path object pointing to a fresh new temporary directory
+ (which we created ourself).
+ """
+ import tempfile
+ if rootdir is None:
+ rootdir = cls.get_temproot()
+ return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
+ mkdtemp = classmethod(mkdtemp)
+
+ def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
+ lock_timeout = 172800): # two days
+ """ return unique directory with a number greater than the current
+ maximum one. The number is assumed to start directly after prefix.
+ if keep is true directories with a number less than (maxnum-keep)
+ will be removed.
+ """
+ if rootdir is None:
+ rootdir = cls.get_temproot()
+
+ def parse_num(path):
+ """ parse the number out of a path (if it matches the prefix) """
+ bn = path.basename
+ if bn.startswith(prefix):
+ try:
+ return int(bn[len(prefix):])
+ except ValueError:
+ pass
+
+ # compute the maximum number currently in use with the
+ # prefix
+ lastmax = None
+ while True:
+ maxnum = -1
+ for path in rootdir.listdir():
+ num = parse_num(path)
+ if num is not None:
+ maxnum = max(maxnum, num)
+
+ # make the new directory
+ try:
+ udir = rootdir.mkdir(prefix + str(maxnum+1))
+ except py.error.EEXIST:
+ # race condition: another thread/process created the dir
+ # in the meantime. Try counting again
+ if lastmax == maxnum:
+ raise
+ lastmax = maxnum
+ continue
+ break
+
+ # put a .lock file in the new directory that will be removed at
+ # process exit
+ if lock_timeout:
+ lockfile = udir.join('.lock')
+ mypid = os.getpid()
+ if hasattr(lockfile, 'mksymlinkto'):
+ lockfile.mksymlinkto(str(mypid))
+ else:
+ lockfile.write(str(mypid))
+ def try_remove_lockfile():
+ # in a fork() situation, only the last process should
+ # remove the .lock, otherwise the other processes run the
+ # risk of seeing their temporary dir disappear. For now
+ # we remove the .lock in the parent only (i.e. we assume
+ # that the children finish before the parent).
+ if os.getpid() != mypid:
+ return
+ try:
+ lockfile.remove()
+ except py.error.Error:
+ pass
+ atexit.register(try_remove_lockfile)
+
+ # prune old directories
+ if keep:
+ for path in rootdir.listdir():
+ num = parse_num(path)
+ if num is not None and num <= (maxnum - keep):
+ lf = path.join('.lock')
+ try:
+ t1 = lf.lstat().mtime
+ t2 = lockfile.lstat().mtime
+ if not lock_timeout or abs(t2-t1) < lock_timeout:
+ continue # skip directories still locked
+ except py.error.Error:
+ pass # assume that it means that there is no 'lf'
+ try:
+ path.remove(rec=1)
+ except KeyboardInterrupt:
+ raise
+ except: # this might be py.error.Error, WindowsError ...
+ pass
+
+ # make link...
+ try:
+ username = os.environ['USER'] #linux, et al
+ except KeyError:
+ try:
+ username = os.environ['USERNAME'] #windows
+ except KeyError:
+ username = 'current'
+
+ src = str(udir)
+ dest = src[:src.rfind('-')] + '-' + username
+ try:
+ os.unlink(dest)
+ except OSError:
+ pass
+ try:
+ os.symlink(src, dest)
+ except (OSError, AttributeError, NotImplementedError):
+ pass
+
+ return udir
+ make_numbered_dir = classmethod(make_numbered_dir)
+
+def copymode(src, dest):
+ py.std.shutil.copymode(src, dest)
+
+def copychunked(src, dest):
+ chunksize = 524288 # half a meg of bytes
+ fsrc = src.open('rb')
+ try:
+ fdest = dest.open('wb')
+ try:
+ while 1:
+ buf = fsrc.read(chunksize)
+ if not buf:
+ break
+ fdest.write(buf)
+ finally:
+ fdest.close()
+ finally:
+ fsrc.close()
+
+def isimportable(name):
+ if name and (name[0].isalpha() or name[0] == '_'):
+ name = name.replace("_", '')
+ return not name or name.isalnum()
diff --git a/testing/web-platform/tests/tools/py/py/_path/svnurl.py b/testing/web-platform/tests/tools/py/py/_path/svnurl.py
new file mode 100644
index 000000000..78d71317a
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_path/svnurl.py
@@ -0,0 +1,380 @@
+"""
+module defining a subversion path object based on the external
+command 'svn'. This modules aims to work with svn 1.3 and higher
+but might also interact well with earlier versions.
+"""
+
+import os, sys, time, re
+import py
+from py import path, process
+from py._path import common
+from py._path import svnwc as svncommon
+from py._path.cacheutil import BuildcostAccessCache, AgingCache
+
+DEBUG=False
+
+class SvnCommandPath(svncommon.SvnPathBase):
+ """ path implementation that offers access to (possibly remote) subversion
+ repositories. """
+
+ _lsrevcache = BuildcostAccessCache(maxentries=128)
+ _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
+
+ def __new__(cls, path, rev=None, auth=None):
+ self = object.__new__(cls)
+ if isinstance(path, cls):
+ rev = path.rev
+ auth = path.auth
+ path = path.strpath
+ svncommon.checkbadchars(path)
+ path = path.rstrip('/')
+ self.strpath = path
+ self.rev = rev
+ self.auth = auth
+ return self
+
+ def __repr__(self):
+ if self.rev == -1:
+ return 'svnurl(%r)' % self.strpath
+ else:
+ return 'svnurl(%r, %r)' % (self.strpath, self.rev)
+
+ def _svnwithrev(self, cmd, *args):
+ """ execute an svn command, append our own url and revision """
+ if self.rev is None:
+ return self._svnwrite(cmd, *args)
+ else:
+ args = ['-r', self.rev] + list(args)
+ return self._svnwrite(cmd, *args)
+
+ def _svnwrite(self, cmd, *args):
+ """ execute an svn command, append our own url """
+ l = ['svn %s' % cmd]
+ args = ['"%s"' % self._escape(item) for item in args]
+ l.extend(args)
+ l.append('"%s"' % self._encodedurl())
+ # fixing the locale because we can't otherwise parse
+ string = " ".join(l)
+ if DEBUG:
+ print("execing %s" % string)
+ out = self._svncmdexecauth(string)
+ return out
+
+ def _svncmdexecauth(self, cmd):
+ """ execute an svn command 'as is' """
+ cmd = svncommon.fixlocale() + cmd
+ if self.auth is not None:
+ cmd += ' ' + self.auth.makecmdoptions()
+ return self._cmdexec(cmd)
+
+ def _cmdexec(self, cmd):
+ try:
+ out = process.cmdexec(cmd)
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if (e.err.find('File Exists') != -1 or
+ e.err.find('File already exists') != -1):
+ raise py.error.EEXIST(self)
+ raise
+ return out
+
+ def _svnpopenauth(self, cmd):
+ """ execute an svn command, return a pipe for reading stdin """
+ cmd = svncommon.fixlocale() + cmd
+ if self.auth is not None:
+ cmd += ' ' + self.auth.makecmdoptions()
+ return self._popen(cmd)
+
+ def _popen(self, cmd):
+ return os.popen(cmd)
+
+ def _encodedurl(self):
+ return self._escape(self.strpath)
+
+ def _norev_delentry(self, path):
+ auth = self.auth and self.auth.makecmdoptions() or None
+ self._lsnorevcache.delentry((str(path), auth))
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ if mode not in ("r", "rU",):
+ raise ValueError("mode %r not supported" % (mode,))
+ assert self.check(file=1) # svn cat returns an empty file otherwise
+ if self.rev is None:
+ return self._svnpopenauth('svn cat "%s"' % (
+ self._escape(self.strpath), ))
+ else:
+ return self._svnpopenauth('svn cat -r %s "%s"' % (
+ self.rev, self._escape(self.strpath)))
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path of the current path joined
+ with any given path arguments.
+ """
+ l = self.strpath.split(self.sep)
+ if len(l) < 4:
+ raise py.error.EINVAL(self, "base is not valid")
+ elif len(l) == 4:
+ return self.join(*args, **kwargs)
+ else:
+ return self.new(basename='').join(*args, **kwargs)
+
+ # modifying methods (cache must be invalidated)
+ def mkdir(self, *args, **kwargs):
+ """ create & return the directory joined with args.
+ pass a 'msg' keyword argument to set the commit message.
+ """
+ commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
+ createpath = self.join(*args)
+ createpath._svnwrite('mkdir', '-m', commit_msg)
+ self._norev_delentry(createpath.dirpath())
+ return createpath
+
+ def copy(self, target, msg='copied by py lib invocation'):
+ """ copy path to target with checkin message msg."""
+ if getattr(target, 'rev', None) is not None:
+ raise py.error.EINVAL(target, "revisions are immutable")
+ self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
+ self._escape(self), self._escape(target)))
+ self._norev_delentry(target.dirpath())
+
+ def rename(self, target, msg="renamed by py lib invocation"):
+ """ rename this path to target with checkin message msg. """
+ if getattr(self, 'rev', None) is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
+ msg, self._escape(self), self._escape(target)))
+ self._norev_delentry(self.dirpath())
+ self._norev_delentry(self)
+
+ def remove(self, rec=1, msg='removed by py lib invocation'):
+ """ remove a file or directory (or a directory tree if rec=1) with
+checkin message msg."""
+ if self.rev is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
+ self._norev_delentry(self.dirpath())
+
+ def export(self, topath):
+ """ export to a local path
+
+ topath should not exist prior to calling this, returns a
+ py.path.local instance
+ """
+ topath = py.path.local(topath)
+ args = ['"%s"' % (self._escape(self),),
+ '"%s"' % (self._escape(topath),)]
+ if self.rev is not None:
+ args = ['-r', str(self.rev)] + args
+ self._svncmdexecauth('svn export %s' % (' '.join(args),))
+ return topath
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). If you specify a keyword argument 'dir=True'
+ then the path is forced to be a directory path.
+ """
+ if getattr(self, 'rev', None) is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ target = self.join(*args)
+ dir = kwargs.get('dir', 0)
+ for x in target.parts(reverse=True):
+ if x.check():
+ break
+ else:
+ raise py.error.ENOENT(target, "has not any valid base!")
+ if x == target:
+ if not x.check(dir=dir):
+ raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
+ return x
+ tocreate = target.relto(x)
+ basename = tocreate.split(self.sep, 1)[0]
+ tempdir = py.path.local.mkdtemp()
+ try:
+ tempdir.ensure(tocreate, dir=dir)
+ cmd = 'svn import -m "%s" "%s" "%s"' % (
+ "ensure %s" % self._escape(tocreate),
+ self._escape(tempdir.join(basename)),
+ x.join(basename)._encodedurl())
+ self._svncmdexecauth(cmd)
+ self._norev_delentry(x)
+ finally:
+ tempdir.remove()
+ return target
+
+ # end of modifying methods
+ def _propget(self, name):
+ res = self._svnwithrev('propget', name)
+ return res[:-1] # strip trailing newline
+
+ def _proplist(self):
+ res = self._svnwithrev('proplist')
+ lines = res.split('\n')
+ lines = [x.strip() for x in lines[1:]]
+ return svncommon.PropListDict(self, lines)
+
+ def info(self):
+ """ return an Info structure with svn-provided information. """
+ parent = self.dirpath()
+ nameinfo_seq = parent._listdir_nameinfo()
+ bn = self.basename
+ for name, info in nameinfo_seq:
+ if name == bn:
+ return info
+ raise py.error.ENOENT(self)
+
+
+ def _listdir_nameinfo(self):
+ """ return sequence of name-info directory entries of self """
+ def builder():
+ try:
+ res = self._svnwithrev('ls', '-v')
+ except process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('non-existent in that revision') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find("E200009:") != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('File not found') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('not part of a repository')!=-1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('Unable to open')!=-1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.lower().find('method not allowed')!=-1:
+ raise py.error.EACCES(self, e.err)
+ raise py.error.Error(e.err)
+ lines = res.split('\n')
+ nameinfo_seq = []
+ for lsline in lines:
+ if lsline:
+ info = InfoSvnCommand(lsline)
+ if info._name != '.': # svn 1.5 produces '.' dirs,
+ nameinfo_seq.append((info._name, info))
+ nameinfo_seq.sort()
+ return nameinfo_seq
+ auth = self.auth and self.auth.makecmdoptions() or None
+ if self.rev is not None:
+ return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
+ builder)
+ else:
+ return self._lsnorevcache.getorbuild((self.strpath, auth),
+ builder)
+
+ def listdir(self, fil=None, sort=None):
+ """ list directory contents, possibly filter by the given fil func
+ and possibly sorted.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ nameinfo_seq = self._listdir_nameinfo()
+ if len(nameinfo_seq) == 1:
+ name, info = nameinfo_seq[0]
+ if name == self.basename and info.kind == 'file':
+ #if not self.check(dir=1):
+ raise py.error.ENOTDIR(self)
+ paths = [self.join(name) for (name, info) in nameinfo_seq]
+ if fil:
+ paths = [x for x in paths if fil(x)]
+ self._sortlist(paths, sort)
+ return paths
+
+
+ def log(self, rev_start=None, rev_end=1, verbose=False):
+ """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+ assert self.check() #make it simpler for the pipe
+ rev_start = rev_start is None and "HEAD" or rev_start
+ rev_end = rev_end is None and "HEAD" or rev_end
+
+ if rev_start == "HEAD" and rev_end == 1:
+ rev_opt = ""
+ else:
+ rev_opt = "-r %s:%s" % (rev_start, rev_end)
+ verbose_opt = verbose and "-v" or ""
+ xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' %
+ (rev_opt, verbose_opt, self.strpath))
+ from xml.dom import minidom
+ tree = minidom.parse(xmlpipe)
+ result = []
+ for logentry in filter(None, tree.firstChild.childNodes):
+ if logentry.nodeType == logentry.ELEMENT_NODE:
+ result.append(svncommon.LogEntry(logentry))
+ return result
+
+#01234567890123456789012345678901234567890123467
+# 2256 hpk 165 Nov 24 17:55 __init__.py
+# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
+# 1312 johnny 1627 May 05 14:32 test_decorators.py
+#
+class InfoSvnCommand:
+ # the '0?' part in the middle is an indication of whether the resource is
+ # locked, see 'svn help ls'
+ lspattern = re.compile(
+ r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
+ '*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
+ def __init__(self, line):
+ # this is a typical line from 'svn ls http://...'
+ #_ 1127 jum 0 Jul 13 15:28 branch/
+ match = self.lspattern.match(line)
+ data = match.groupdict()
+ self._name = data['file']
+ if self._name[-1] == '/':
+ self._name = self._name[:-1]
+ self.kind = 'dir'
+ else:
+ self.kind = 'file'
+ #self.has_props = l.pop(0) == 'P'
+ self.created_rev = int(data['rev'])
+ self.last_author = data['author']
+ self.size = data['size'] and int(data['size']) or 0
+ self.mtime = parse_time_with_missing_year(data['date'])
+ self.time = self.mtime * 1000000
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+
+#____________________________________________________
+#
+# helper functions
+#____________________________________________________
+def parse_time_with_missing_year(timestr):
+ """ analyze the time part from a single line of "svn ls -v"
+ the svn output doesn't show the year makes the 'timestr'
+ ambigous.
+ """
+ import calendar
+ t_now = time.gmtime()
+
+ tparts = timestr.split()
+ month = time.strptime(tparts.pop(0), '%b')[1]
+ day = time.strptime(tparts.pop(0), '%d')[2]
+ last = tparts.pop(0) # year or hour:minute
+ try:
+ if ":" in last:
+ raise ValueError()
+ year = time.strptime(last, '%Y')[0]
+ hour = minute = 0
+ except ValueError:
+ hour, minute = time.strptime(last, '%H:%M')[3:5]
+ year = t_now[0]
+
+ t_result = (year, month, day, hour, minute, 0,0,0,0)
+ if t_result > t_now:
+ year -= 1
+ t_result = (year, month, day, hour, minute, 0,0,0,0)
+ return calendar.timegm(t_result)
+
+class PathEntry:
+ def __init__(self, ppart):
+ self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
+ self.action = ppart.getAttribute('action').encode('UTF-8')
+ if self.action == 'A':
+ self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
+ if self.copyfrom_path:
+ self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
+
diff --git a/testing/web-platform/tests/tools/py/py/_path/svnwc.py b/testing/web-platform/tests/tools/py/py/_path/svnwc.py
new file mode 100644
index 000000000..00d3b4bba
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_path/svnwc.py
@@ -0,0 +1,1240 @@
+"""
+svn-Command based Implementation of a Subversion WorkingCopy Path.
+
+ SvnWCCommandPath is the main class.
+
+"""
+
+import os, sys, time, re, calendar
+import py
+import subprocess
+from py._path import common
+
+#-----------------------------------------------------------
+# Caching latest repository revision and repo-paths
+# (getting them is slow with the current implementations)
+#
+# XXX make mt-safe
+#-----------------------------------------------------------
+
+class cache:
+ proplist = {}
+ info = {}
+ entries = {}
+ prop = {}
+
+class RepoEntry:
+ def __init__(self, url, rev, timestamp):
+ self.url = url
+ self.rev = rev
+ self.timestamp = timestamp
+
+ def __str__(self):
+ return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
+
+class RepoCache:
+ """ The Repocache manages discovered repository paths
+ and their revisions. If inside a timeout the cache
+ will even return the revision of the root.
+ """
+ timeout = 20 # seconds after which we forget that we know the last revision
+
+ def __init__(self):
+ self.repos = []
+
+ def clear(self):
+ self.repos = []
+
+ def put(self, url, rev, timestamp=None):
+ if rev is None:
+ return
+ if timestamp is None:
+ timestamp = time.time()
+
+ for entry in self.repos:
+ if url == entry.url:
+ entry.timestamp = timestamp
+ entry.rev = rev
+ #print "set repo", entry
+ break
+ else:
+ entry = RepoEntry(url, rev, timestamp)
+ self.repos.append(entry)
+ #print "appended repo", entry
+
+ def get(self, url):
+ now = time.time()
+ for entry in self.repos:
+ if url.startswith(entry.url):
+ if now < entry.timestamp + self.timeout:
+ #print "returning immediate Etrny", entry
+ return entry.url, entry.rev
+ return entry.url, -1
+ return url, -1
+
+repositories = RepoCache()
+
+
+# svn support code
+
+ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
+if sys.platform == "win32":
+ ALLOWED_CHARS += ":"
+ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
+
+def _getsvnversion(ver=[]):
+ try:
+ return ver[0]
+ except IndexError:
+ v = py.process.cmdexec("svn -q --version")
+ v.strip()
+ v = '.'.join(v.split('.')[:2])
+ ver.append(v)
+ return v
+
+def _escape_helper(text):
+ text = str(text)
+ if py.std.sys.platform != 'win32':
+ text = str(text).replace('$', '\\$')
+ return text
+
+def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
+ for c in str(text):
+ if c.isalnum():
+ continue
+ if c in allowed_chars:
+ continue
+ return True
+ return False
+
+def checkbadchars(url):
+ # (hpk) not quite sure about the exact purpose, guido w.?
+ proto, uri = url.split("://", 1)
+ if proto != "file":
+ host, uripath = uri.split('/', 1)
+ # only check for bad chars in the non-protocol parts
+ if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
+ or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
+ raise ValueError("bad char in %r" % (url, ))
+
+
+#_______________________________________________________________
+
+class SvnPathBase(common.PathBase):
+ """ Base implementation for SvnPath implementations. """
+ sep = '/'
+
+ def _geturl(self):
+ return self.strpath
+ url = property(_geturl, None, None, "url of this svn-path.")
+
+ def __str__(self):
+ """ return a string representation (including rev-number) """
+ return self.strpath
+
+ def __hash__(self):
+ return hash(self.strpath)
+
+ def new(self, **kw):
+ """ create a modified version of this path. A 'rev' argument
+ indicates a new revision.
+ the following keyword arguments modify various path parts::
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ obj = object.__new__(self.__class__)
+ obj.rev = kw.get('rev', self.rev)
+ obj.auth = kw.get('auth', self.auth)
+ dirname, basename, purebasename, ext = self._getbyspec(
+ "dirname,basename,purebasename,ext")
+ if 'basename' in kw:
+ if 'purebasename' in kw or 'ext' in kw:
+ raise ValueError("invalid specification %r" % kw)
+ else:
+ pb = kw.setdefault('purebasename', purebasename)
+ ext = kw.setdefault('ext', ext)
+ if ext and not ext.startswith('.'):
+ ext = '.' + ext
+ kw['basename'] = pb + ext
+
+ kw.setdefault('dirname', dirname)
+ kw.setdefault('sep', self.sep)
+ if kw['basename']:
+ obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
+ else:
+ obj.strpath = "%(dirname)s" % kw
+ return obj
+
+ def _getbyspec(self, spec):
+ """ get specified parts of the path. 'arg' is a string
+ with comma separated path parts. The parts are returned
+ in exactly the order of the specification.
+
+ you may specify the following parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ res = []
+ parts = self.strpath.split(self.sep)
+ for name in spec.split(','):
+ name = name.strip()
+ if name == 'dirname':
+ res.append(self.sep.join(parts[:-1]))
+ elif name == 'basename':
+ res.append(parts[-1])
+ else:
+ basename = parts[-1]
+ i = basename.rfind('.')
+ if i == -1:
+ purebasename, ext = basename, ''
+ else:
+ purebasename, ext = basename[:i], basename[i:]
+ if name == 'purebasename':
+ res.append(purebasename)
+ elif name == 'ext':
+ res.append(ext)
+ else:
+ raise NameError("Don't know part %r" % name)
+ return res
+
+ def __eq__(self, other):
+ """ return true if path and rev attributes each match """
+ return (str(self) == str(other) and
+ (self.rev == other.rev or self.rev == other.rev))
+
+ def __ne__(self, other):
+ return not self == other
+
+ def join(self, *args):
+ """ return a new Path (with the same revision) which is composed
+ of the self Path followed by 'args' path components.
+ """
+ if not args:
+ return self
+
+ args = tuple([arg.strip(self.sep) for arg in args])
+ parts = (self.strpath, ) + args
+ newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
+ return newpath
+
+ def propget(self, name):
+ """ return the content of the given property. """
+ value = self._propget(name)
+ return value
+
+ def proplist(self):
+ """ list all property names. """
+ content = self._proplist()
+ return content
+
+ def size(self):
+ """ Return the size of the file content of the Path. """
+ return self.info().size
+
+ def mtime(self):
+ """ Return the last modification time of the file. """
+ return self.info().mtime
+
+ # shared help methods
+
+ def _escape(self, cmd):
+ return _escape_helper(cmd)
+
+
+ #def _childmaxrev(self):
+ # """ return maximum revision number of childs (or self.rev if no childs) """
+ # rev = self.rev
+ # for name, info in self._listdir_nameinfo():
+ # rev = max(rev, info.created_rev)
+ # return rev
+
+ #def _getlatestrevision(self):
+ # """ return latest repo-revision for this path. """
+ # url = self.strpath
+ # path = self.__class__(url, None)
+ #
+ # # we need a long walk to find the root-repo and revision
+ # while 1:
+ # try:
+ # rev = max(rev, path._childmaxrev())
+ # previous = path
+ # path = path.dirpath()
+ # except (IOError, process.cmdexec.Error):
+ # break
+ # if rev is None:
+ # raise IOError, "could not determine newest repo revision for %s" % self
+ # return rev
+
+ class Checkers(common.Checkers):
+ def dir(self):
+ try:
+ return self.path.info().kind == 'dir'
+ except py.error.Error:
+ return self._listdirworks()
+
+ def _listdirworks(self):
+ try:
+ self.path.listdir()
+ except py.error.ENOENT:
+ return False
+ else:
+ return True
+
+ def file(self):
+ try:
+ return self.path.info().kind == 'file'
+ except py.error.ENOENT:
+ return False
+
+ def exists(self):
+ try:
+ return self.path.info()
+ except py.error.ENOENT:
+ return self._listdirworks()
+
+def parse_apr_time(timestr):
+ i = timestr.rfind('.')
+ if i == -1:
+ raise ValueError("could not parse %s" % timestr)
+ timestr = timestr[:i]
+ parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
+ return time.mktime(parsedtime)
+
+class PropListDict(dict):
+ """ a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
+ def __init__(self, path, keynames):
+ dict.__init__(self, [(x, None) for x in keynames])
+ self.path = path
+
+ def __getitem__(self, key):
+ value = dict.__getitem__(self, key)
+ if value is None:
+ value = self.path.propget(key)
+ dict.__setitem__(self, key, value)
+ return value
+
+def fixlocale():
+ if sys.platform != 'win32':
+ return 'LC_ALL=C '
+ return ''
+
+# some nasty chunk of code to solve path and url conversion and quoting issues
+ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ')
+if os.sep in ILLEGAL_CHARS:
+ ILLEGAL_CHARS.remove(os.sep)
+ISWINDOWS = sys.platform == 'win32'
+_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
+def _check_path(path):
+ illegal = ILLEGAL_CHARS[:]
+ sp = path.strpath
+ if ISWINDOWS:
+ illegal.remove(':')
+ if not _reg_allow_disk.match(sp):
+ raise ValueError('path may not contain a colon (:)')
+ for char in sp:
+ if char not in string.printable or char in illegal:
+ raise ValueError('illegal character %r in path' % (char,))
+
+def path_to_fspath(path, addat=True):
+ _check_path(path)
+ sp = path.strpath
+ if addat and path.rev != -1:
+ sp = '%s@%s' % (sp, path.rev)
+ elif addat:
+ sp = '%s@HEAD' % (sp,)
+ return sp
+
+def url_from_path(path):
+ fspath = path_to_fspath(path, False)
+ quote = py.std.urllib.quote
+ if ISWINDOWS:
+ match = _reg_allow_disk.match(fspath)
+ fspath = fspath.replace('\\', '/')
+ if match.group(1):
+ fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
+ quote(fspath[len(match.group(1)):]))
+ else:
+ fspath = quote(fspath)
+ else:
+ fspath = quote(fspath)
+ if path.rev != -1:
+ fspath = '%s@%s' % (fspath, path.rev)
+ else:
+ fspath = '%s@HEAD' % (fspath,)
+ return 'file://%s' % (fspath,)
+
+class SvnAuth(object):
+ """ container for auth information for Subversion """
+ def __init__(self, username, password, cache_auth=True, interactive=True):
+ self.username = username
+ self.password = password
+ self.cache_auth = cache_auth
+ self.interactive = interactive
+
+ def makecmdoptions(self):
+ uname = self.username.replace('"', '\\"')
+ passwd = self.password.replace('"', '\\"')
+ ret = []
+ if uname:
+ ret.append('--username="%s"' % (uname,))
+ if passwd:
+ ret.append('--password="%s"' % (passwd,))
+ if not self.cache_auth:
+ ret.append('--no-auth-cache')
+ if not self.interactive:
+ ret.append('--non-interactive')
+ return ' '.join(ret)
+
+ def __str__(self):
+ return "<SvnAuth username=%s ...>" %(self.username,)
+
+rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
+
+class SvnWCCommandPath(common.PathBase):
+ """ path implementation offering access/modification to svn working copies.
+ It has methods similar to the functions in os.path and similar to the
+ commands of the svn client.
+ """
+ sep = os.sep
+
+ def __new__(cls, wcpath=None, auth=None):
+ self = object.__new__(cls)
+ if isinstance(wcpath, cls):
+ if wcpath.__class__ == cls:
+ return wcpath
+ wcpath = wcpath.localpath
+ if _check_for_bad_chars(str(wcpath),
+ ALLOWED_CHARS):
+ raise ValueError("bad char in wcpath %s" % (wcpath, ))
+ self.localpath = py.path.local(wcpath)
+ self.auth = auth
+ return self
+
+ strpath = property(lambda x: str(x.localpath), None, None, "string path")
+ rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
+
+ def __eq__(self, other):
+ return self.localpath == getattr(other, 'localpath', None)
+
+ def _geturl(self):
+ if getattr(self, '_url', None) is None:
+ info = self.info()
+ self._url = info.url #SvnPath(info.url, info.rev)
+ assert isinstance(self._url, py.builtin._basestring)
+ return self._url
+
+ url = property(_geturl, None, None, "url of this WC item")
+
+ def _escape(self, cmd):
+ return _escape_helper(cmd)
+
+ def dump(self, obj):
+ """ pickle object into path location"""
+ return self.localpath.dump(obj)
+
+ def svnurl(self):
+ """ return current SvnPath for this WC-item. """
+ info = self.info()
+ return py.path.svnurl(info.url)
+
+ def __repr__(self):
+ return "svnwc(%r)" % (self.strpath) # , self._url)
+
+ def __str__(self):
+ return str(self.localpath)
+
+ def _makeauthoptions(self):
+ if self.auth is None:
+ return ''
+ return self.auth.makecmdoptions()
+
+ def _authsvn(self, cmd, args=None):
+ args = args and list(args) or []
+ args.append(self._makeauthoptions())
+ return self._svn(cmd, *args)
+
+ def _svn(self, cmd, *args):
+ l = ['svn %s' % cmd]
+ args = [self._escape(item) for item in args]
+ l.extend(args)
+ l.append('"%s"' % self._escape(self.strpath))
+ # try fixing the locale because we can't otherwise parse
+ string = fixlocale() + " ".join(l)
+ try:
+ try:
+ key = 'LC_MESSAGES'
+ hold = os.environ.get(key)
+ os.environ[key] = 'C'
+ out = py.process.cmdexec(string)
+ finally:
+ if hold:
+ os.environ[key] = hold
+ else:
+ del os.environ[key]
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ strerr = e.err.lower()
+ if strerr.find('not found') != -1:
+ raise py.error.ENOENT(self)
+ elif strerr.find("E200009:") != -1:
+ raise py.error.ENOENT(self)
+ if (strerr.find('file exists') != -1 or
+ strerr.find('file already exists') != -1 or
+ strerr.find('w150002:') != -1 or
+ strerr.find("can't create directory") != -1):
+ raise py.error.EEXIST(strerr) #self)
+ raise
+ return out
+
+ def switch(self, url):
+ """ switch to given URL. """
+ self._authsvn('switch', [url])
+
+ def checkout(self, url=None, rev=None):
+ """ checkout from url to local wcpath. """
+ args = []
+ if url is None:
+ url = self.url
+ if rev is None or rev == -1:
+ if (py.std.sys.platform != 'win32' and
+ _getsvnversion() == '1.3'):
+ url += "@HEAD"
+ else:
+ if _getsvnversion() == '1.3':
+ url += "@%d" % rev
+ else:
+ args.append('-r' + str(rev))
+ args.append(url)
+ self._authsvn('co', args)
+
+ def update(self, rev='HEAD', interactive=True):
+ """ update working copy item to given revision. (None -> HEAD). """
+ opts = ['-r', rev]
+ if not interactive:
+ opts.append("--non-interactive")
+ self._authsvn('up', opts)
+
+ def write(self, content, mode='w'):
+ """ write content into local filesystem wc. """
+ self.localpath.write(content, mode)
+
+ def dirpath(self, *args):
+ """ return the directory Path of the current Path. """
+ return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
+
+ def _ensuredirs(self):
+ parent = self.dirpath()
+ if parent.check(dir=0):
+ parent._ensuredirs()
+ if self.check(dir=0):
+ self.mkdir()
+ return self
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). if you specify a keyword argument 'directory=True'
+ then the path is forced to be a directory path.
+ """
+ p = self.join(*args)
+ if p.check():
+ if p.check(versioned=False):
+ p.add()
+ return p
+ if kwargs.get('dir', 0):
+ return p._ensuredirs()
+ parent = p.dirpath()
+ parent._ensuredirs()
+ p.write("")
+ p.add()
+ return p
+
+ def mkdir(self, *args):
+ """ create & return the directory joined with args. """
+ if args:
+ return self.join(*args).mkdir()
+ else:
+ self._svn('mkdir')
+ return self
+
+ def add(self):
+ """ add ourself to svn """
+ self._svn('add')
+
+ def remove(self, rec=1, force=1):
+ """ remove a file or a directory tree. 'rec'ursive is
+ ignored and considered always true (because of
+ underlying svn semantics.
+ """
+ assert rec, "svn cannot remove non-recursively"
+ if not self.check(versioned=True):
+ # not added to svn (anymore?), just remove
+ py.path.local(self).remove()
+ return
+ flags = []
+ if force:
+ flags.append('--force')
+ self._svn('remove', *flags)
+
+ def copy(self, target):
+ """ copy path to target."""
+ py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
+
+ def rename(self, target):
+ """ rename this path to target. """
+ py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
+
+ def lock(self):
+ """ set a lock (exclusive) on the resource """
+ out = self._authsvn('lock').strip()
+ if not out:
+ # warning or error, raise exception
+ raise ValueError("unknown error in svn lock command")
+
+ def unlock(self):
+ """ unset a previously set lock """
+ out = self._authsvn('unlock').strip()
+ if out.startswith('svn:'):
+ # warning or error, raise exception
+ raise Exception(out[4:])
+
+ def cleanup(self):
+ """ remove any locks from the resource """
+ # XXX should be fixed properly!!!
+ try:
+ self.unlock()
+ except:
+ pass
+
+ def status(self, updates=0, rec=0, externals=0):
+ """ return (collective) Status object for this file. """
+ # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
+ # 2201 2192 jum test
+ # XXX
+ if externals:
+ raise ValueError("XXX cannot perform status() "
+ "on external items yet")
+ else:
+ #1.2 supports: externals = '--ignore-externals'
+ externals = ''
+ if rec:
+ rec= ''
+ else:
+ rec = '--non-recursive'
+
+ # XXX does not work on all subversion versions
+ #if not externals:
+ # externals = '--ignore-externals'
+
+ if updates:
+ updates = '-u'
+ else:
+ updates = ''
+
+ try:
+ cmd = 'status -v --xml --no-ignore %s %s %s' % (
+ updates, rec, externals)
+ out = self._authsvn(cmd)
+ except py.process.cmdexec.Error:
+ cmd = 'status -v --no-ignore %s %s %s' % (
+ updates, rec, externals)
+ out = self._authsvn(cmd)
+ rootstatus = WCStatus(self).fromstring(out, self)
+ else:
+ rootstatus = XMLWCStatus(self).fromstring(out, self)
+ return rootstatus
+
+ def diff(self, rev=None):
+ """ return a diff of the current path against revision rev (defaulting
+ to the last one).
+ """
+ args = []
+ if rev is not None:
+ args.append("-r %d" % rev)
+ out = self._authsvn('diff', args)
+ return out
+
+ def blame(self):
+ """ return a list of tuples of three elements:
+ (revision, commiter, line)
+ """
+ out = self._svn('blame')
+ result = []
+ blamelines = out.splitlines()
+ reallines = py.path.svnurl(self.url).readlines()
+ for i, (blameline, line) in enumerate(
+ zip(blamelines, reallines)):
+ m = rex_blame.match(blameline)
+ if not m:
+ raise ValueError("output line %r of svn blame does not match "
+ "expected format" % (line, ))
+ rev, name, _ = m.groups()
+ result.append((int(rev), name, line))
+ return result
+
+ _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
+ def commit(self, msg='', rec=1):
+ """ commit with support for non-recursive commits """
+ # XXX i guess escaping should be done better here?!?
+ cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
+ if not rec:
+ cmd += ' -N'
+ out = self._authsvn(cmd)
+ try:
+ del cache.info[self]
+ except KeyError:
+ pass
+ if out:
+ m = self._rex_commit.match(out)
+ return int(m.group(1))
+
+ def propset(self, name, value, *args):
+ """ set property name to value on this path. """
+ d = py.path.local.mkdtemp()
+ try:
+ p = d.join('value')
+ p.write(value)
+ self._svn('propset', name, '--file', str(p), *args)
+ finally:
+ d.remove()
+
+ def propget(self, name):
+ """ get property name on this path. """
+ res = self._svn('propget', name)
+ return res[:-1] # strip trailing newline
+
+ def propdel(self, name):
+ """ delete property name on this path. """
+ res = self._svn('propdel', name)
+ return res[:-1] # strip trailing newline
+
+ def proplist(self, rec=0):
+ """ return a mapping of property names to property values.
+If rec is True, then return a dictionary mapping sub-paths to such mappings.
+"""
+ if rec:
+ res = self._svn('proplist -R')
+ return make_recursive_propdict(self, res)
+ else:
+ res = self._svn('proplist')
+ lines = res.split('\n')
+ lines = [x.strip() for x in lines[1:]]
+ return PropListDict(self, lines)
+
+ def revert(self, rec=0):
+ """ revert the local changes of this path. if rec is True, do so
+recursively. """
+ if rec:
+ result = self._svn('revert -R')
+ else:
+ result = self._svn('revert')
+ return result
+
+ def new(self, **kw):
+ """ create a modified version of this path. A 'rev' argument
+ indicates a new revision.
+ the following keyword arguments modify various path parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ if kw:
+ localpath = self.localpath.new(**kw)
+ else:
+ localpath = self.localpath
+ return self.__class__(localpath, auth=self.auth)
+
+ def join(self, *args, **kwargs):
+ """ return a new Path (with the same revision) which is composed
+ of the self Path followed by 'args' path components.
+ """
+ if not args:
+ return self
+ localpath = self.localpath.join(*args, **kwargs)
+ return self.__class__(localpath, auth=self.auth)
+
+ def info(self, usecache=1):
+ """ return an Info structure with svn-provided information. """
+ info = usecache and cache.info.get(self)
+ if not info:
+ try:
+ output = self._svn('info')
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('Path is not a working copy directory') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find("is not under version control") != -1:
+ raise py.error.ENOENT(self, e.err)
+ raise
+ # XXX SVN 1.3 has output on stderr instead of stdout (while it does
+ # return 0!), so a bit nasty, but we assume no output is output
+ # to stderr...
+ if (output.strip() == '' or
+ output.lower().find('not a versioned resource') != -1):
+ raise py.error.ENOENT(self, output)
+ info = InfoSvnWCCommand(output)
+
+ # Can't reliably compare on Windows without access to win32api
+ if py.std.sys.platform != 'win32':
+ if info.path != self.localpath:
+ raise py.error.ENOENT(self, "not a versioned resource:" +
+ " %s != %s" % (info.path, self.localpath))
+ cache.info[self] = info
+ return info
+
+ def listdir(self, fil=None, sort=None):
+ """ return a sequence of Paths.
+
+ listdir will return either a tuple or a list of paths
+ depending on implementation choices.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ # XXX unify argument naming with LocalPath.listdir
+ def notsvn(path):
+ return path.basename != '.svn'
+
+ paths = []
+ for localpath in self.localpath.listdir(notsvn):
+ p = self.__class__(localpath, auth=self.auth)
+ if notsvn(p) and (not fil or fil(p)):
+ paths.append(p)
+ self._sortlist(paths, sort)
+ return paths
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ return open(self.strpath, mode)
+
+ def _getbyspec(self, spec):
+ return self.localpath._getbyspec(spec)
+
+ class Checkers(py.path.local.Checkers):
+ def __init__(self, path):
+ self.svnwcpath = path
+ self.path = path.localpath
+ def versioned(self):
+ try:
+ s = self.svnwcpath.info()
+ except (py.error.ENOENT, py.error.EEXIST):
+ return False
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('is not a working copy')!=-1:
+ return False
+ if e.err.lower().find('not a versioned resource') != -1:
+ return False
+ raise
+ else:
+ return True
+
+ def log(self, rev_start=None, rev_end=1, verbose=False):
+ """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+ assert self.check() # make it simpler for the pipe
+ rev_start = rev_start is None and "HEAD" or rev_start
+ rev_end = rev_end is None and "HEAD" or rev_end
+ if rev_start == "HEAD" and rev_end == 1:
+ rev_opt = ""
+ else:
+ rev_opt = "-r %s:%s" % (rev_start, rev_end)
+ verbose_opt = verbose and "-v" or ""
+ locale_env = fixlocale()
+ # some blather on stderr
+ auth_opt = self._makeauthoptions()
+ #stdin, stdout, stderr = os.popen3(locale_env +
+ # 'svn log --xml %s %s %s "%s"' % (
+ # rev_opt, verbose_opt, auth_opt,
+ # self.strpath))
+ cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
+ rev_opt, verbose_opt, auth_opt, self.strpath)
+
+ popen = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ )
+ stdout, stderr = popen.communicate()
+ stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+ minidom,ExpatError = importxml()
+ try:
+ tree = minidom.parseString(stdout)
+ except ExpatError:
+ raise ValueError('no such revision')
+ result = []
+ for logentry in filter(None, tree.firstChild.childNodes):
+ if logentry.nodeType == logentry.ELEMENT_NODE:
+ result.append(LogEntry(logentry))
+ return result
+
+ def size(self):
+ """ Return the size of the file content of the Path. """
+ return self.info().size
+
+ def mtime(self):
+ """ Return the last modification time of the file. """
+ return self.info().mtime
+
+ def __hash__(self):
+ return hash((self.strpath, self.__class__, self.auth))
+
+
+class WCStatus:
+ attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
+ 'deleted', 'prop_modified', 'unknown', 'update_available',
+ 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
+ )
+
+ def __init__(self, wcpath, rev=None, modrev=None, author=None):
+ self.wcpath = wcpath
+ self.rev = rev
+ self.modrev = modrev
+ self.author = author
+
+ for name in self.attrnames:
+ setattr(self, name, [])
+
+ def allpath(self, sort=True, **kw):
+ d = {}
+ for name in self.attrnames:
+ if name not in kw or kw[name]:
+ for path in getattr(self, name):
+ d[path] = 1
+ l = d.keys()
+ if sort:
+ l.sort()
+ return l
+
+ # XXX a bit scary to assume there's always 2 spaces between username and
+ # path, however with win32 allowing spaces in user names there doesn't
+ # seem to be a more solid approach :(
+ _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
+
+ def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+ """ return a new WCStatus object from data 's'
+ """
+ rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+ update_rev = None
+ for line in data.split('\n'):
+ if not line.strip():
+ continue
+ #print "processing %r" % line
+ flags, rest = line[:8], line[8:]
+ # first column
+ c0,c1,c2,c3,c4,c5,x6,c7 = flags
+ #if '*' in line:
+ # print "flags", repr(flags), "rest", repr(rest)
+
+ if c0 in '?XI':
+ fn = line.split(None, 1)[1]
+ if c0 == '?':
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.unknown.append(wcpath)
+ elif c0 == 'X':
+ wcpath = rootwcpath.__class__(
+ rootwcpath.localpath.join(fn, abs=1),
+ auth=rootwcpath.auth)
+ rootstatus.external.append(wcpath)
+ elif c0 == 'I':
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.ignored.append(wcpath)
+
+ continue
+
+ #elif c0 in '~!' or c4 == 'S':
+ # raise NotImplementedError("received flag %r" % c0)
+
+ m = WCStatus._rex_status.match(rest)
+ if not m:
+ if c7 == '*':
+ fn = rest.strip()
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.update_available.append(wcpath)
+ continue
+ if line.lower().find('against revision:')!=-1:
+ update_rev = int(rest.split(':')[1].strip())
+ continue
+ if line.lower().find('status on external') > -1:
+ # XXX not sure what to do here... perhaps we want to
+ # store some state instead of just continuing, as right
+ # now it makes the top-level external get added twice
+ # (once as external, once as 'normal' unchanged item)
+ # because of the way SVN presents external items
+ continue
+ # keep trying
+ raise ValueError("could not parse line %r" % line)
+ else:
+ rev, modrev, author, fn = m.groups()
+ wcpath = rootwcpath.join(fn, abs=1)
+ #assert wcpath.check()
+ if c0 == 'M':
+ assert wcpath.check(file=1), "didn't expect a directory with changed content here"
+ rootstatus.modified.append(wcpath)
+ elif c0 == 'A' or c3 == '+' :
+ rootstatus.added.append(wcpath)
+ elif c0 == 'D':
+ rootstatus.deleted.append(wcpath)
+ elif c0 == 'C':
+ rootstatus.conflict.append(wcpath)
+ elif c0 == '~':
+ rootstatus.kindmismatch.append(wcpath)
+ elif c0 == '!':
+ rootstatus.incomplete.append(wcpath)
+ elif c0 == 'R':
+ rootstatus.replaced.append(wcpath)
+ elif not c0.strip():
+ rootstatus.unchanged.append(wcpath)
+ else:
+ raise NotImplementedError("received flag %r" % c0)
+
+ if c1 == 'M':
+ rootstatus.prop_modified.append(wcpath)
+ # XXX do we cover all client versions here?
+ if c2 == 'L' or c5 == 'K':
+ rootstatus.locked.append(wcpath)
+ if c7 == '*':
+ rootstatus.update_available.append(wcpath)
+
+ if wcpath == rootwcpath:
+ rootstatus.rev = rev
+ rootstatus.modrev = modrev
+ rootstatus.author = author
+ if update_rev:
+ rootstatus.update_rev = update_rev
+ continue
+ return rootstatus
+ fromstring = staticmethod(fromstring)
+
+class XMLWCStatus(WCStatus):
+ def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+ """ parse 'data' (XML string as outputted by svn st) into a status obj
+ """
+ # XXX for externals, the path is shown twice: once
+ # with external information, and once with full info as if
+ # the item was a normal non-external... the current way of
+ # dealing with this issue is by ignoring it - this does make
+ # externals appear as external items as well as 'normal',
+ # unchanged ones in the status object so this is far from ideal
+ rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+ update_rev = None
+ minidom, ExpatError = importxml()
+ try:
+ doc = minidom.parseString(data)
+ except ExpatError:
+ e = sys.exc_info()[1]
+ raise ValueError(str(e))
+ urevels = doc.getElementsByTagName('against')
+ if urevels:
+ rootstatus.update_rev = urevels[-1].getAttribute('revision')
+ for entryel in doc.getElementsByTagName('entry'):
+ path = entryel.getAttribute('path')
+ statusel = entryel.getElementsByTagName('wc-status')[0]
+ itemstatus = statusel.getAttribute('item')
+
+ if itemstatus == 'unversioned':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.unknown.append(wcpath)
+ continue
+ elif itemstatus == 'external':
+ wcpath = rootwcpath.__class__(
+ rootwcpath.localpath.join(path, abs=1),
+ auth=rootwcpath.auth)
+ rootstatus.external.append(wcpath)
+ continue
+ elif itemstatus == 'ignored':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.ignored.append(wcpath)
+ continue
+ elif itemstatus == 'incomplete':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.incomplete.append(wcpath)
+ continue
+
+ rev = statusel.getAttribute('revision')
+ if itemstatus == 'added' or itemstatus == 'none':
+ rev = '0'
+ modrev = '?'
+ author = '?'
+ date = ''
+ elif itemstatus == "replaced":
+ pass
+ else:
+ #print entryel.toxml()
+ commitel = entryel.getElementsByTagName('commit')[0]
+ if commitel:
+ modrev = commitel.getAttribute('revision')
+ author = ''
+ author_els = commitel.getElementsByTagName('author')
+ if author_els:
+ for c in author_els[0].childNodes:
+ author += c.nodeValue
+ date = ''
+ for c in commitel.getElementsByTagName('date')[0]\
+ .childNodes:
+ date += c.nodeValue
+
+ wcpath = rootwcpath.join(path, abs=1)
+
+ assert itemstatus != 'modified' or wcpath.check(file=1), (
+ 'did\'t expect a directory with changed content here')
+
+ itemattrname = {
+ 'normal': 'unchanged',
+ 'unversioned': 'unknown',
+ 'conflicted': 'conflict',
+ 'none': 'added',
+ }.get(itemstatus, itemstatus)
+
+ attr = getattr(rootstatus, itemattrname)
+ attr.append(wcpath)
+
+ propsstatus = statusel.getAttribute('props')
+ if propsstatus not in ('none', 'normal'):
+ rootstatus.prop_modified.append(wcpath)
+
+ if wcpath == rootwcpath:
+ rootstatus.rev = rev
+ rootstatus.modrev = modrev
+ rootstatus.author = author
+ rootstatus.date = date
+
+ # handle repos-status element (remote info)
+ rstatusels = entryel.getElementsByTagName('repos-status')
+ if rstatusels:
+ rstatusel = rstatusels[0]
+ ritemstatus = rstatusel.getAttribute('item')
+ if ritemstatus in ('added', 'modified'):
+ rootstatus.update_available.append(wcpath)
+
+ lockels = entryel.getElementsByTagName('lock')
+ if len(lockels):
+ rootstatus.locked.append(wcpath)
+
+ return rootstatus
+ fromstring = staticmethod(fromstring)
+
+class InfoSvnWCCommand:
+ def __init__(self, output):
+ # Path: test
+ # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
+ # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ # Revision: 2151
+ # Node Kind: directory
+ # Schedule: normal
+ # Last Changed Author: hpk
+ # Last Changed Rev: 2100
+ # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+ # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
+
+ d = {}
+ for line in output.split('\n'):
+ if not line.strip():
+ continue
+ key, value = line.split(':', 1)
+ key = key.lower().replace(' ', '')
+ value = value.strip()
+ d[key] = value
+ try:
+ self.url = d['url']
+ except KeyError:
+ raise ValueError("Not a versioned resource")
+ #raise ValueError, "Not a versioned resource %r" % path
+ self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
+ try:
+ self.rev = int(d['revision'])
+ except KeyError:
+ self.rev = None
+
+ self.path = py.path.local(d['path'])
+ self.size = self.path.size()
+ if 'lastchangedrev' in d:
+ self.created_rev = int(d['lastchangedrev'])
+ if 'lastchangedauthor' in d:
+ self.last_author = d['lastchangedauthor']
+ if 'lastchangeddate' in d:
+ self.mtime = parse_wcinfotime(d['lastchangeddate'])
+ self.time = self.mtime * 1000000
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+def parse_wcinfotime(timestr):
+ """ Returns seconds since epoch, UTC. """
+ # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+ m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
+ if not m:
+ raise ValueError("timestring %r does not match" % timestr)
+ timestr, timezone = m.groups()
+ # do not handle timezone specially, return value should be UTC
+ parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
+ return calendar.timegm(parsedtime)
+
+def make_recursive_propdict(wcroot,
+ output,
+ rex = re.compile("Properties on '(.*)':")):
+ """ Return a dictionary of path->PropListDict mappings. """
+ lines = [x for x in output.split('\n') if x]
+ pdict = {}
+ while lines:
+ line = lines.pop(0)
+ m = rex.match(line)
+ if not m:
+ raise ValueError("could not parse propget-line: %r" % line)
+ path = m.groups()[0]
+ wcpath = wcroot.join(path, abs=1)
+ propnames = []
+ while lines and lines[0].startswith(' '):
+ propname = lines.pop(0).strip()
+ propnames.append(propname)
+ assert propnames, "must have found properties!"
+ pdict[wcpath] = PropListDict(wcpath, propnames)
+ return pdict
+
+
+def importxml(cache=[]):
+ if cache:
+ return cache
+ from xml.dom import minidom
+ from xml.parsers.expat import ExpatError
+ cache.extend([minidom, ExpatError])
+ return cache
+
+class LogEntry:
+ def __init__(self, logentry):
+ self.rev = int(logentry.getAttribute('revision'))
+ for lpart in filter(None, logentry.childNodes):
+ if lpart.nodeType == lpart.ELEMENT_NODE:
+ if lpart.nodeName == 'author':
+ self.author = lpart.firstChild.nodeValue
+ elif lpart.nodeName == 'msg':
+ if lpart.firstChild:
+ self.msg = lpart.firstChild.nodeValue
+ else:
+ self.msg = ''
+ elif lpart.nodeName == 'date':
+ #2003-07-29T20:05:11.598637Z
+ timestr = lpart.firstChild.nodeValue
+ self.date = parse_apr_time(timestr)
+ elif lpart.nodeName == 'paths':
+ self.strpaths = []
+ for ppart in filter(None, lpart.childNodes):
+ if ppart.nodeType == ppart.ELEMENT_NODE:
+ self.strpaths.append(PathEntry(ppart))
+ def __repr__(self):
+ return '<Logentry rev=%d author=%s date=%s>' % (
+ self.rev, self.author, self.date)
+
+
diff --git a/testing/web-platform/tests/tools/py/py/_process/__init__.py b/testing/web-platform/tests/tools/py/py/_process/__init__.py
new file mode 100644
index 000000000..86c714ad1
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_process/__init__.py
@@ -0,0 +1 @@
+""" high-level sub-process handling """
diff --git a/testing/web-platform/tests/tools/py/py/_process/cmdexec.py b/testing/web-platform/tests/tools/py/py/_process/cmdexec.py
new file mode 100644
index 000000000..f83a24940
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_process/cmdexec.py
@@ -0,0 +1,49 @@
+import sys
+import subprocess
+import py
+from subprocess import Popen, PIPE
+
+def cmdexec(cmd):
+ """ return unicode output of executing 'cmd' in a separate process.
+
+ raise cmdexec.Error exeception if the command failed.
+ the exception will provide an 'err' attribute containing
+ the error-output from the command.
+ if the subprocess module does not provide a proper encoding/unicode strings
+ sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
+ """
+ process = subprocess.Popen(cmd, shell=True,
+ universal_newlines=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = process.communicate()
+ if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
+ try:
+ default_encoding = sys.getdefaultencoding() # jython may not have it
+ except AttributeError:
+ default_encoding = sys.stdout.encoding or 'UTF-8'
+ out = unicode(out, process.stdout.encoding or default_encoding)
+ err = unicode(err, process.stderr.encoding or default_encoding)
+ status = process.poll()
+ if status:
+ raise ExecutionFailed(status, status, cmd, out, err)
+ return out
+
+class ExecutionFailed(py.error.Error):
+ def __init__(self, status, systemstatus, cmd, out, err):
+ Exception.__init__(self)
+ self.status = status
+ self.systemstatus = systemstatus
+ self.cmd = cmd
+ self.err = err
+ self.out = out
+
+ def __str__(self):
+ return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
+
+# export the exception under the name 'py.process.cmdexec.Error'
+cmdexec.Error = ExecutionFailed
+try:
+ ExecutionFailed.__module__ = 'py.process.cmdexec'
+ ExecutionFailed.__name__ = 'Error'
+except (AttributeError, TypeError):
+ pass
diff --git a/testing/web-platform/tests/tools/py/py/_process/forkedfunc.py b/testing/web-platform/tests/tools/py/py/_process/forkedfunc.py
new file mode 100644
index 000000000..1c2853068
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_process/forkedfunc.py
@@ -0,0 +1,120 @@
+
+"""
+ ForkedFunc provides a way to run a function in a forked process
+ and get at its return value, stdout and stderr output as well
+ as signals and exitstatusus.
+"""
+
+import py
+import os
+import sys
+import marshal
+
+
+def get_unbuffered_io(fd, filename):
+ f = open(str(filename), "w")
+ if fd != f.fileno():
+ os.dup2(f.fileno(), fd)
+ class AutoFlush:
+ def write(self, data):
+ f.write(data)
+ f.flush()
+ def __getattr__(self, name):
+ return getattr(f, name)
+ return AutoFlush()
+
+
+class ForkedFunc:
+ EXITSTATUS_EXCEPTION = 3
+
+
+ def __init__(self, fun, args=None, kwargs=None, nice_level=0,
+ child_on_start=None, child_on_exit=None):
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+ self.fun = fun
+ self.args = args
+ self.kwargs = kwargs
+ self.tempdir = tempdir = py.path.local.mkdtemp()
+ self.RETVAL = tempdir.ensure('retval')
+ self.STDOUT = tempdir.ensure('stdout')
+ self.STDERR = tempdir.ensure('stderr')
+
+ pid = os.fork()
+ if pid: # in parent process
+ self.pid = pid
+ else: # in child process
+ self.pid = None
+ self._child(nice_level, child_on_start, child_on_exit)
+
+ def _child(self, nice_level, child_on_start, child_on_exit):
+ # right now we need to call a function, but first we need to
+ # map all IO that might happen
+ sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT)
+ sys.stderr = stderr = get_unbuffered_io(2, self.STDERR)
+ retvalf = self.RETVAL.open("wb")
+ EXITSTATUS = 0
+ try:
+ if nice_level:
+ os.nice(nice_level)
+ try:
+ if child_on_start is not None:
+ child_on_start()
+ retval = self.fun(*self.args, **self.kwargs)
+ retvalf.write(marshal.dumps(retval))
+ if child_on_exit is not None:
+ child_on_exit()
+ except:
+ excinfo = py.code.ExceptionInfo()
+ stderr.write(str(excinfo._getreprcrash()))
+ EXITSTATUS = self.EXITSTATUS_EXCEPTION
+ finally:
+ stdout.close()
+ stderr.close()
+ retvalf.close()
+ os.close(1)
+ os.close(2)
+ os._exit(EXITSTATUS)
+
+ def waitfinish(self, waiter=os.waitpid):
+ pid, systemstatus = waiter(self.pid, 0)
+ if systemstatus:
+ if os.WIFSIGNALED(systemstatus):
+ exitstatus = os.WTERMSIG(systemstatus) + 128
+ else:
+ exitstatus = os.WEXITSTATUS(systemstatus)
+ else:
+ exitstatus = 0
+ signal = systemstatus & 0x7f
+ if not exitstatus and not signal:
+ retval = self.RETVAL.open('rb')
+ try:
+ retval_data = retval.read()
+ finally:
+ retval.close()
+ retval = marshal.loads(retval_data)
+ else:
+ retval = None
+ stdout = self.STDOUT.read()
+ stderr = self.STDERR.read()
+ self._removetemp()
+ return Result(exitstatus, signal, retval, stdout, stderr)
+
+ def _removetemp(self):
+ if self.tempdir.check():
+ self.tempdir.remove()
+
+ def __del__(self):
+ if self.pid is not None: # only clean up in main process
+ self._removetemp()
+
+
+class Result(object):
+ def __init__(self, exitstatus, signal, retval, stdout, stderr):
+ self.exitstatus = exitstatus
+ self.signal = signal
+ self.retval = retval
+ self.out = stdout
+ self.err = stderr
diff --git a/testing/web-platform/tests/tools/py/py/_process/killproc.py b/testing/web-platform/tests/tools/py/py/_process/killproc.py
new file mode 100644
index 000000000..18e8310b5
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_process/killproc.py
@@ -0,0 +1,23 @@
+import py
+import os, sys
+
+if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
+ try:
+ import ctypes
+ except ImportError:
+ def dokill(pid):
+ py.process.cmdexec("taskkill /F /PID %d" %(pid,))
+ else:
+ def dokill(pid):
+ PROCESS_TERMINATE = 1
+ handle = ctypes.windll.kernel32.OpenProcess(
+ PROCESS_TERMINATE, False, pid)
+ ctypes.windll.kernel32.TerminateProcess(handle, -1)
+ ctypes.windll.kernel32.CloseHandle(handle)
+else:
+ def dokill(pid):
+ os.kill(pid, 15)
+
+def kill(pid):
+ """ kill process by id. """
+ dokill(pid)
diff --git a/testing/web-platform/tests/tools/py/py/_std.py b/testing/web-platform/tests/tools/py/py/_std.py
new file mode 100644
index 000000000..97a985332
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_std.py
@@ -0,0 +1,18 @@
+import sys
+
+class Std(object):
+ """ makes top-level python modules available as an attribute,
+ importing them on first access.
+ """
+
+ def __init__(self):
+ self.__dict__ = sys.modules
+
+ def __getattr__(self, name):
+ try:
+ m = __import__(name)
+ except ImportError:
+ raise AttributeError("py.std: could not import %s" % name)
+ return m
+
+std = Std()
diff --git a/testing/web-platform/tests/tools/py/py/_xmlgen.py b/testing/web-platform/tests/tools/py/py/_xmlgen.py
new file mode 100644
index 000000000..2ffcaa14b
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/_xmlgen.py
@@ -0,0 +1,253 @@
+"""
+module for generating and serializing xml and html structures
+by using simple python objects.
+
+(c) holger krekel, holger at merlinux eu. 2009
+"""
+import sys, re
+
+if sys.version_info >= (3,0):
+ def u(s):
+ return s
+ def unicode(x, errors=None):
+ if hasattr(x, '__unicode__'):
+ return x.__unicode__()
+ return str(x)
+else:
+ def u(s):
+ return unicode(s)
+ unicode = unicode
+
+
+class NamespaceMetaclass(type):
+ def __getattr__(self, name):
+ if name[:1] == '_':
+ raise AttributeError(name)
+ if self == Namespace:
+ raise ValueError("Namespace class is abstract")
+ tagspec = self.__tagspec__
+ if tagspec is not None and name not in tagspec:
+ raise AttributeError(name)
+ classattr = {}
+ if self.__stickyname__:
+ classattr['xmlname'] = name
+ cls = type(name, (self.__tagclass__,), classattr)
+ setattr(self, name, cls)
+ return cls
+
+class Tag(list):
+ class Attr(object):
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ def __init__(self, *args, **kwargs):
+ super(Tag, self).__init__(args)
+ self.attr = self.Attr(**kwargs)
+
+ def __unicode__(self):
+ return self.unicode(indent=0)
+ __str__ = __unicode__
+
+ def unicode(self, indent=2):
+ l = []
+ SimpleUnicodeVisitor(l.append, indent).visit(self)
+ return u("").join(l)
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ return "<%r tag object %d>" % (name, id(self))
+
+Namespace = NamespaceMetaclass('Namespace', (object, ), {
+ '__tagspec__': None,
+ '__tagclass__': Tag,
+ '__stickyname__': False,
+})
+
+class HtmlTag(Tag):
+ def unicode(self, indent=2):
+ l = []
+ HtmlVisitor(l.append, indent, shortempty=False).visit(self)
+ return u("").join(l)
+
+# exported plain html namespace
+class html(Namespace):
+ __tagclass__ = HtmlTag
+ __stickyname__ = True
+ __tagspec__ = dict([(x,1) for x in (
+ 'a,abbr,acronym,address,applet,area,b,bdo,big,blink,'
+ 'blockquote,body,br,button,caption,center,cite,code,col,'
+ 'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,'
+ 'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,'
+ 'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,'
+ 'map,marquee,menu,meta,multicol,nobr,noembed,noframes,'
+ 'noscript,object,ol,optgroup,option,p,pre,q,s,script,'
+ 'select,small,span,strike,strong,style,sub,sup,table,'
+ 'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,'
+ 'base,basefont,frame,hr,isindex,param,samp,var'
+ ).split(',') if x])
+
+ class Style(object):
+ def __init__(self, **kw):
+ for x, y in kw.items():
+ x = x.replace('_', '-')
+ setattr(self, x, y)
+
+
+class raw(object):
+ """just a box that can contain a unicode string that will be
+ included directly in the output"""
+ def __init__(self, uniobj):
+ self.uniobj = uniobj
+
+class SimpleUnicodeVisitor(object):
+ """ recursive visitor to write unicode. """
+ def __init__(self, write, indent=0, curindent=0, shortempty=True):
+ self.write = write
+ self.cache = {}
+ self.visited = {} # for detection of recursion
+ self.indent = indent
+ self.curindent = curindent
+ self.parents = []
+ self.shortempty = shortempty # short empty tags or not
+
+ def visit(self, node):
+ """ dispatcher on node's class/bases name. """
+ cls = node.__class__
+ try:
+ visitmethod = self.cache[cls]
+ except KeyError:
+ for subclass in cls.__mro__:
+ visitmethod = getattr(self, subclass.__name__, None)
+ if visitmethod is not None:
+ break
+ else:
+ visitmethod = self.__object
+ self.cache[cls] = visitmethod
+ visitmethod(node)
+
+ # the default fallback handler is marked private
+ # to avoid clashes with the tag name object
+ def __object(self, obj):
+ #self.write(obj)
+ self.write(escape(unicode(obj)))
+
+ def raw(self, obj):
+ self.write(obj.uniobj)
+
+ def list(self, obj):
+ assert id(obj) not in self.visited
+ self.visited[id(obj)] = 1
+ for elem in obj:
+ self.visit(elem)
+
+ def Tag(self, tag):
+ assert id(tag) not in self.visited
+ try:
+ tag.parent = self.parents[-1]
+ except IndexError:
+ tag.parent = None
+ self.visited[id(tag)] = 1
+ tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
+ if self.curindent and not self._isinline(tagname):
+ self.write("\n" + u(' ') * self.curindent)
+ if tag:
+ self.curindent += self.indent
+ self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
+ self.parents.append(tag)
+ for x in tag:
+ self.visit(x)
+ self.parents.pop()
+ self.write(u('</%s>') % tagname)
+ self.curindent -= self.indent
+ else:
+ nameattr = tagname+self.attributes(tag)
+ if self._issingleton(tagname):
+ self.write(u('<%s/>') % (nameattr,))
+ else:
+ self.write(u('<%s></%s>') % (nameattr, tagname))
+
+ def attributes(self, tag):
+ # serialize attributes
+ attrlist = dir(tag.attr)
+ attrlist.sort()
+ l = []
+ for name in attrlist:
+ res = self.repr_attribute(tag.attr, name)
+ if res is not None:
+ l.append(res)
+ l.extend(self.getstyle(tag))
+ return u("").join(l)
+
+ def repr_attribute(self, attrs, name):
+ if name[:2] != '__':
+ value = getattr(attrs, name)
+ if name.endswith('_'):
+ name = name[:-1]
+ if isinstance(value, raw):
+ insert = value.uniobj
+ else:
+ insert = escape(unicode(value))
+ return ' %s="%s"' % (name, insert)
+
+ def getstyle(self, tag):
+ """ return attribute list suitable for styling. """
+ try:
+ styledict = tag.style.__dict__
+ except AttributeError:
+ return []
+ else:
+ stylelist = [x+': ' + y for x,y in styledict.items()]
+ return [u(' style="%s"') % u('; ').join(stylelist)]
+
+ def _issingleton(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return self.shortempty
+
+ def _isinline(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return False
+
+class HtmlVisitor(SimpleUnicodeVisitor):
+
+ single = dict([(x, 1) for x in
+ ('br,img,area,param,col,hr,meta,link,base,'
+ 'input,frame').split(',')])
+ inline = dict([(x, 1) for x in
+ ('a abbr acronym b basefont bdo big br cite code dfn em font '
+ 'i img input kbd label q s samp select small span strike '
+ 'strong sub sup textarea tt u var'.split(' '))])
+
+ def repr_attribute(self, attrs, name):
+ if name == 'class_':
+ value = getattr(attrs, name)
+ if value is None:
+ return
+ return super(HtmlVisitor, self).repr_attribute(attrs, name)
+
+ def _issingleton(self, tagname):
+ return tagname in self.single
+
+ def _isinline(self, tagname):
+ return tagname in self.inline
+
+
+class _escape:
+ def __init__(self):
+ self.escape = {
+ u('"') : u('&quot;'), u('<') : u('&lt;'), u('>') : u('&gt;'),
+ u('&') : u('&amp;'), u("'") : u('&apos;'),
+ }
+ self.charef_rex = re.compile(u("|").join(self.escape.keys()))
+
+ def _replacer(self, match):
+ return self.escape[match.group(0)]
+
+ def __call__(self, ustring):
+ """ xml-escape the given unicode string. """
+ try:
+ ustring = unicode(ustring)
+ except UnicodeDecodeError:
+ ustring = unicode(ustring, 'utf-8', errors='replace')
+ return self.charef_rex.sub(self._replacer, ustring)
+
+escape = _escape()
diff --git a/testing/web-platform/tests/tools/py/py/test.py b/testing/web-platform/tests/tools/py/py/test.py
new file mode 100644
index 000000000..aa5beb178
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/py/test.py
@@ -0,0 +1,10 @@
+import sys
+if __name__ == '__main__':
+ import pytest
+ sys.exit(pytest.main())
+else:
+ import sys, pytest
+ sys.modules['py.test'] = pytest
+
+# for more API entry points see the 'tests' definition
+# in __init__.py
diff --git a/testing/web-platform/tests/tools/py/setup.cfg b/testing/web-platform/tests/tools/py/setup.cfg
new file mode 100644
index 000000000..272e488f3
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/setup.cfg
@@ -0,0 +1,5 @@
+[wheel]
+universal = 1
+
+[devpi:upload]
+formats=sdist.tgz,bdist_wheel
diff --git a/testing/web-platform/tests/tools/py/setup.py b/testing/web-platform/tests/tools/py/setup.py
new file mode 100644
index 000000000..06f0885cd
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/setup.py
@@ -0,0 +1,38 @@
+import os, sys
+
+from setuptools import setup
+
+def main():
+ setup(
+ name='py',
+ description='library with cross-python path, ini-parsing, io, code, log facilities',
+ long_description = open('README.txt').read(),
+ version='1.4.31',
+ url='http://pylib.readthedocs.org/',
+ license='MIT license',
+ platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
+ author='holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others',
+ author_email='pytest-dev@python.org',
+ classifiers=['Development Status :: 6 - Mature',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: POSIX',
+ 'Operating System :: Microsoft :: Windows',
+ 'Operating System :: MacOS :: MacOS X',
+ 'Topic :: Software Development :: Testing',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Utilities',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3'],
+ packages=['py',
+ 'py._code',
+ 'py._io',
+ 'py._log',
+ 'py._path',
+ 'py._process',
+ ],
+ zip_safe=False,
+ )
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/web-platform/tests/tools/py/testing/code/test_assertion.py b/testing/web-platform/tests/tools/py/testing/code/test_assertion.py
new file mode 100644
index 000000000..e2154d0fc
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/code/test_assertion.py
@@ -0,0 +1,308 @@
+import pytest, py
+
+def exvalue():
+ return py.std.sys.exc_info()[1]
+
+def f():
+ return 2
+
+def test_assert():
+ try:
+ assert f() == 3
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 == 3\n')
+
+
+def test_assert_within_finally():
+ excinfo = py.test.raises(ZeroDivisionError, """
+ try:
+ 1/0
+ finally:
+ i = 42
+ """)
+ s = excinfo.exconly()
+ assert py.std.re.search("division.+by zero", s) is not None
+
+ #def g():
+ # A.f()
+ #excinfo = getexcinfo(TypeError, g)
+ #msg = getmsg(excinfo)
+ #assert msg.find("must be called with A") != -1
+
+
+def test_assert_multiline_1():
+ try:
+ assert (f() ==
+ 3)
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 == 3\n')
+
+def test_assert_multiline_2():
+ try:
+ assert (f() == (4,
+ 3)[-1])
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 ==')
+
+def test_in():
+ try:
+ assert "hi" in [1, 2]
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 'hi' in")
+
+def test_is():
+ try:
+ assert 1 is 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 is 2")
+
+
+@py.test.mark.skipif("sys.version_info < (2,6)")
+def test_attrib():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ try:
+ assert i.b == 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 == 2")
+
+@py.test.mark.skipif("sys.version_info < (2,6)")
+def test_attrib_inst():
+ class Foo(object):
+ b = 1
+ try:
+ assert Foo().b == 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 == 2")
+
+def test_len():
+ l = list(range(42))
+ try:
+ assert len(l) == 100
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 42 == 100")
+ assert "where 42 = len([" in s
+
+
+def test_assert_keyword_arg():
+ def f(x=3):
+ return False
+ try:
+ assert f(x=5)
+ except AssertionError:
+ e = exvalue()
+ assert "x=5" in e.msg
+
+# These tests should both fail, but should fail nicely...
+class WeirdRepr:
+ def __repr__(self):
+ return '<WeirdRepr\nsecond line>'
+
+def bug_test_assert_repr():
+ v = WeirdRepr()
+ try:
+ assert v == 1
+ except AssertionError:
+ e = exvalue()
+ assert e.msg.find('WeirdRepr') != -1
+ assert e.msg.find('second line') != -1
+ assert 0
+
+def test_assert_non_string():
+ try:
+ assert 0, ['list']
+ except AssertionError:
+ e = exvalue()
+ assert e.msg.find("list") != -1
+
+def test_assert_implicit_multiline():
+ try:
+ x = [1,2,3]
+ assert x != [1,
+ 2, 3]
+ except AssertionError:
+ e = exvalue()
+ assert e.msg.find('assert [1, 2, 3] !=') != -1
+
+
+def test_assert_with_brokenrepr_arg():
+ class BrokenRepr:
+ def __repr__(self): 0 / 0
+ e = AssertionError(BrokenRepr())
+ if e.msg.find("broken __repr__") == -1:
+ py.test.fail("broken __repr__ not handle correctly")
+
+def test_multiple_statements_per_line():
+ try:
+ a = 1; assert a == 2
+ except AssertionError:
+ e = exvalue()
+ assert "assert 1 == 2" in e.msg
+
+def test_power():
+ try:
+ assert 2**3 == 7
+ except AssertionError:
+ e = exvalue()
+ assert "assert (2 ** 3) == 7" in e.msg
+
+
+class TestView:
+
+ def setup_class(cls):
+ cls.View = py.test.importorskip("py._code._assertionold").View
+
+ def test_class_dispatch(self):
+ ### Use a custom class hierarchy with existing instances
+
+ class Picklable(self.View):
+ pass
+
+ class Simple(Picklable):
+ __view__ = object
+ def pickle(self):
+ return repr(self.__obj__)
+
+ class Seq(Picklable):
+ __view__ = list, tuple, dict
+ def pickle(self):
+ return ';'.join(
+ [Picklable(item).pickle() for item in self.__obj__])
+
+ class Dict(Seq):
+ __view__ = dict
+ def pickle(self):
+ return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
+
+ assert Picklable(123).pickle() == '123'
+ assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
+ assert Picklable({1:2}).pickle() == '1!2'
+
+ def test_viewtype_class_hierarchy(self):
+ # Use a custom class hierarchy based on attributes of existing instances
+ class Operation:
+ "Existing class that I don't want to change."
+ def __init__(self, opname, *args):
+ self.opname = opname
+ self.args = args
+
+ existing = [Operation('+', 4, 5),
+ Operation('getitem', '', 'join'),
+ Operation('setattr', 'x', 'y', 3),
+ Operation('-', 12, 1)]
+
+ class PyOp(self.View):
+ def __viewkey__(self):
+ return self.opname
+ def generate(self):
+ return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
+
+ class PyBinaryOp(PyOp):
+ __view__ = ('+', '-', '*', '/')
+ def generate(self):
+ return '%s %s %s' % (self.args[0], self.opname, self.args[1])
+
+ codelines = [PyOp(op).generate() for op in existing]
+ assert codelines == ["4 + 5", "getitem('', 'join')",
+ "setattr('x', 'y', 3)", "12 - 1"]
+
+def test_underscore_api():
+ py.code._AssertionError
+ py.code._reinterpret_old # used by pypy
+ py.code._reinterpret
+
+@py.test.mark.skipif("sys.version_info < (2,6)")
+def test_assert_customizable_reprcompare(monkeypatch):
+ util = pytest.importorskip("_pytest.assertion.util")
+ monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
+ try:
+ assert 3 == 4
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert "hello" in s
+
+def test_assert_long_source_1():
+ try:
+ assert len == [
+ (None, ['somet text', 'more text']),
+ ]
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert 're-run' not in s
+ assert 'somet text' in s
+
+def test_assert_long_source_2():
+ try:
+ assert(len == [
+ (None, ['somet text', 'more text']),
+ ])
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert 're-run' not in s
+ assert 'somet text' in s
+
+def test_assert_raise_alias(testdir):
+ testdir.makepyfile("""
+ import sys
+ EX = AssertionError
+ def test_hello():
+ raise EX("hello"
+ "multi"
+ "line")
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*def test_hello*",
+ "*raise EX*",
+ "*1 failed*",
+ ])
+
+
+@pytest.mark.skipif("sys.version_info < (2,5)")
+def test_assert_raise_subclass():
+ class SomeEx(AssertionError):
+ def __init__(self, *args):
+ super(SomeEx, self).__init__()
+ try:
+ raise SomeEx("hello")
+ except AssertionError:
+ s = str(exvalue())
+ assert 're-run' not in s
+ assert 'could not determine' in s
+
+def test_assert_raises_in_nonzero_of_object_pytest_issue10():
+ class A(object):
+ def __nonzero__(self):
+ raise ValueError(42)
+ def __lt__(self, other):
+ return A()
+ def __repr__(self):
+ return "<MY42 object>"
+ def myany(x):
+ return True
+ try:
+ assert not(myany(A() < 0))
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert "<MY42 object> < 0" in s
diff --git a/testing/web-platform/tests/tools/py/testing/code/test_code.py b/testing/web-platform/tests/tools/py/testing/code/test_code.py
new file mode 100644
index 000000000..28ec628b0
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/code/test_code.py
@@ -0,0 +1,159 @@
+import py
+import sys
+
+def test_ne():
+ code1 = py.code.Code(compile('foo = "bar"', '', 'exec'))
+ assert code1 == code1
+ code2 = py.code.Code(compile('foo = "baz"', '', 'exec'))
+ assert code2 != code1
+
+def test_code_gives_back_name_for_not_existing_file():
+ name = 'abc-123'
+ co_code = compile("pass\n", name, 'exec')
+ assert co_code.co_filename == name
+ code = py.code.Code(co_code)
+ assert str(code.path) == name
+ assert code.fullsource is None
+
+def test_code_with_class():
+ class A:
+ pass
+ py.test.raises(TypeError, "py.code.Code(A)")
+
+if True:
+ def x():
+ pass
+
+def test_code_fullsource():
+ code = py.code.Code(x)
+ full = code.fullsource
+ assert 'test_code_fullsource()' in str(full)
+
+def test_code_source():
+ code = py.code.Code(x)
+ src = code.source()
+ expected = """def x():
+ pass"""
+ assert str(src) == expected
+
+def test_frame_getsourcelineno_myself():
+ def func():
+ return sys._getframe(0)
+ f = func()
+ f = py.code.Frame(f)
+ source, lineno = f.code.fullsource, f.lineno
+ assert source[lineno].startswith(" return sys._getframe(0)")
+
+def test_getstatement_empty_fullsource():
+ def func():
+ return sys._getframe(0)
+ f = func()
+ f = py.code.Frame(f)
+ prop = f.code.__class__.fullsource
+ try:
+ f.code.__class__.fullsource = None
+ assert f.statement == py.code.Source("")
+ finally:
+ f.code.__class__.fullsource = prop
+
+def test_code_from_func():
+ co = py.code.Code(test_frame_getsourcelineno_myself)
+ assert co.firstlineno
+ assert co.path
+
+
+
+def test_builtin_patch_unpatch(monkeypatch):
+ cpy_builtin = py.builtin.builtins
+ comp = cpy_builtin.compile
+ def mycompile(*args, **kwargs):
+ return comp(*args, **kwargs)
+ class Sub(AssertionError):
+ pass
+ monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
+ monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
+ py.code.patch_builtins()
+ assert cpy_builtin.AssertionError != Sub
+ assert cpy_builtin.compile != mycompile
+ py.code.unpatch_builtins()
+ assert cpy_builtin.AssertionError is Sub
+ assert cpy_builtin.compile == mycompile
+
+
+def test_unicode_handling():
+ value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
+ def f():
+ raise Exception(value)
+ excinfo = py.test.raises(Exception, f)
+ s = str(excinfo)
+ if sys.version_info[0] < 3:
+ u = unicode(excinfo)
+
+def test_code_getargs():
+ def f1(x):
+ pass
+ c1 = py.code.Code(f1)
+ assert c1.getargs(var=True) == ('x',)
+
+ def f2(x, *y):
+ pass
+ c2 = py.code.Code(f2)
+ assert c2.getargs(var=True) == ('x', 'y')
+
+ def f3(x, **z):
+ pass
+ c3 = py.code.Code(f3)
+ assert c3.getargs(var=True) == ('x', 'z')
+
+ def f4(x, *y, **z):
+ pass
+ c4 = py.code.Code(f4)
+ assert c4.getargs(var=True) == ('x', 'y', 'z')
+
+
+def test_frame_getargs():
+ def f1(x):
+ return sys._getframe(0)
+ fr1 = py.code.Frame(f1('a'))
+ assert fr1.getargs(var=True) == [('x', 'a')]
+
+ def f2(x, *y):
+ return sys._getframe(0)
+ fr2 = py.code.Frame(f2('a', 'b', 'c'))
+ assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
+
+ def f3(x, **z):
+ return sys._getframe(0)
+ fr3 = py.code.Frame(f3('a', b='c'))
+ assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
+
+ def f4(x, *y, **z):
+ return sys._getframe(0)
+ fr4 = py.code.Frame(f4('a', 'b', c='d'))
+ assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
+ ('z', {'c': 'd'})]
+
+
+class TestExceptionInfo:
+
+ def test_bad_getsource(self):
+ try:
+ if False: pass
+ else: assert False
+ except AssertionError:
+ exci = py.code.ExceptionInfo()
+ assert exci.getrepr()
+
+
+class TestTracebackEntry:
+
+ def test_getsource(self):
+ try:
+ if False: pass
+ else: assert False
+ except AssertionError:
+ exci = py.code.ExceptionInfo()
+ entry = exci.traceback[0]
+ source = entry.getsource()
+ assert len(source) == 4
+ assert 'else: assert False' in source[3]
diff --git a/testing/web-platform/tests/tools/py/testing/code/test_excinfo.py b/testing/web-platform/tests/tools/py/testing/code/test_excinfo.py
new file mode 100644
index 000000000..65742c6f6
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/code/test_excinfo.py
@@ -0,0 +1,909 @@
+# -*- coding: utf-8 -*-
+
+import py
+from py._code.code import FormattedExcinfo, ReprExceptionInfo
+queue = py.builtin._tryimport('queue', 'Queue')
+
+failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
+from test_source import astonly
+
+try:
+ import importlib
+except ImportError:
+ invalidate_import_caches = None
+else:
+ invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
+
+import pytest
+pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
+
+class TWMock:
+ def __init__(self):
+ self.lines = []
+ def sep(self, sep, line=None):
+ self.lines.append((sep, line))
+ def line(self, line, **kw):
+ self.lines.append(line)
+ def markup(self, text, **kw):
+ return text
+
+ fullwidth = 80
+
+def test_excinfo_simple():
+ try:
+ raise ValueError
+ except ValueError:
+ info = py.code.ExceptionInfo()
+ assert info.type == ValueError
+
+def test_excinfo_getstatement():
+ def g():
+ raise ValueError
+ def f():
+ g()
+ try:
+ f()
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ linenumbers = [py.code.getrawcode(f).co_firstlineno-1+3,
+ py.code.getrawcode(f).co_firstlineno-1+1,
+ py.code.getrawcode(g).co_firstlineno-1+1,]
+ l = list(excinfo.traceback)
+ foundlinenumbers = [x.lineno for x in l]
+ assert foundlinenumbers == linenumbers
+ #for x in info:
+ # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
+ #xxx
+
+# testchain for getentries test below
+def f():
+ #
+ raise ValueError
+ #
+def g():
+ #
+ __tracebackhide__ = True
+ f()
+ #
+def h():
+ #
+ g()
+ #
+
+class TestTraceback_f_g_h:
+ def setup_method(self, method):
+ try:
+ h()
+ except ValueError:
+ self.excinfo = py.code.ExceptionInfo()
+
+ def test_traceback_entries(self):
+ tb = self.excinfo.traceback
+ entries = list(tb)
+ assert len(tb) == 4 # maybe fragile test
+ assert len(entries) == 4 # maybe fragile test
+ names = ['f', 'g', 'h']
+ for entry in entries:
+ try:
+ names.remove(entry.frame.code.name)
+ except ValueError:
+ pass
+ assert not names
+
+ def test_traceback_entry_getsource(self):
+ tb = self.excinfo.traceback
+ s = str(tb[-1].getsource() )
+ assert s.startswith("def f():")
+ assert s.endswith("raise ValueError")
+
+ @astonly
+ @failsonjython
+ def test_traceback_entry_getsource_in_construct(self):
+ source = py.code.Source("""\
+ def xyz():
+ try:
+ raise ValueError
+ except somenoname:
+ pass
+ xyz()
+ """)
+ try:
+ exec (source.compile())
+ except NameError:
+ tb = py.code.ExceptionInfo().traceback
+ print (tb[-1].getsource())
+ s = str(tb[-1].getsource())
+ assert s.startswith("def xyz():\n try:")
+ assert s.strip().endswith("except somenoname:")
+
+ def test_traceback_cut(self):
+ co = py.code.Code(f)
+ path, firstlineno = co.path, co.firstlineno
+ traceback = self.excinfo.traceback
+ newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ assert len(newtraceback) == 1
+ newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
+ assert len(newtraceback) == 1
+
+ def test_traceback_cut_excludepath(self, testdir):
+ p = testdir.makepyfile("def f(): raise ValueError")
+ excinfo = py.test.raises(ValueError, "p.pyimport().f()")
+ basedir = py.path.local(py.test.__file__).dirpath()
+ newtraceback = excinfo.traceback.cut(excludepath=basedir)
+ for x in newtraceback:
+ if hasattr(x, 'path'):
+ assert not py.path.local(x.path).relto(basedir)
+ assert newtraceback[-1].frame.code.path == p
+
+ def test_traceback_filter(self):
+ traceback = self.excinfo.traceback
+ ntraceback = traceback.filter()
+ assert len(ntraceback) == len(traceback) - 1
+
+ def test_traceback_recursion_index(self):
+ def f(n):
+ if n < 10:
+ n += 1
+ f(n)
+ excinfo = py.test.raises(RuntimeError, f, 8)
+ traceback = excinfo.traceback
+ recindex = traceback.recursionindex()
+ assert recindex == 3
+
+ def test_traceback_only_specific_recursion_errors(self, monkeypatch):
+ def f(n):
+ if n == 0:
+ raise RuntimeError("hello")
+ f(n-1)
+
+ excinfo = pytest.raises(RuntimeError, f, 100)
+ monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
+ repr = excinfo.getrepr()
+ assert "RuntimeError: hello" in str(repr.reprcrash)
+
+ def test_traceback_no_recursion_index(self):
+ def do_stuff():
+ raise RuntimeError
+ def reraise_me():
+ import sys
+ exc, val, tb = sys.exc_info()
+ py.builtin._reraise(exc, val, tb)
+ def f(n):
+ try:
+ do_stuff()
+ except:
+ reraise_me()
+ excinfo = py.test.raises(RuntimeError, f, 8)
+ traceback = excinfo.traceback
+ recindex = traceback.recursionindex()
+ assert recindex is None
+
+ def test_traceback_messy_recursion(self):
+ #XXX: simplified locally testable version
+ decorator = py.test.importorskip('decorator').decorator
+
+ def log(f, *k, **kw):
+ print('%s %s' % (k, kw))
+ f(*k, **kw)
+ log = decorator(log)
+
+ def fail():
+ raise ValueError('')
+
+ fail = log(log(fail))
+
+ excinfo = py.test.raises(ValueError, fail)
+ assert excinfo.traceback.recursionindex() is None
+
+
+
+ def test_traceback_getcrashentry(self):
+ def i():
+ __tracebackhide__ = True
+ raise ValueError
+ def h():
+ i()
+ def g():
+ __tracebackhide__ = True
+ h()
+ def f():
+ g()
+
+ excinfo = py.test.raises(ValueError, f)
+ tb = excinfo.traceback
+ entry = tb.getcrashentry()
+ co = py.code.Code(h)
+ assert entry.frame.code.path == co.path
+ assert entry.lineno == co.firstlineno + 1
+ assert entry.frame.code.name == 'h'
+
+ def test_traceback_getcrashentry_empty(self):
+ def g():
+ __tracebackhide__ = True
+ raise ValueError
+ def f():
+ __tracebackhide__ = True
+ g()
+
+ excinfo = py.test.raises(ValueError, f)
+ tb = excinfo.traceback
+ entry = tb.getcrashentry()
+ co = py.code.Code(g)
+ assert entry.frame.code.path == co.path
+ assert entry.lineno == co.firstlineno + 2
+ assert entry.frame.code.name == 'g'
+
+def hello(x):
+ x + 5
+
+def test_tbentry_reinterpret():
+ try:
+ hello("hello")
+ except TypeError:
+ excinfo = py.code.ExceptionInfo()
+ tbentry = excinfo.traceback[-1]
+ msg = tbentry.reinterpret()
+ assert msg.startswith("TypeError: ('hello' + 5)")
+
+def test_excinfo_exconly():
+ excinfo = py.test.raises(ValueError, h)
+ assert excinfo.exconly().startswith('ValueError')
+ excinfo = py.test.raises(ValueError,
+ "raise ValueError('hello\\nworld')")
+ msg = excinfo.exconly(tryshort=True)
+ assert msg.startswith('ValueError')
+ assert msg.endswith("world")
+
+def test_excinfo_repr():
+ excinfo = py.test.raises(ValueError, h)
+ s = repr(excinfo)
+ assert s == "<ExceptionInfo ValueError tblen=4>"
+
+def test_excinfo_str():
+ excinfo = py.test.raises(ValueError, h)
+ s = str(excinfo)
+ assert s.startswith(__file__[:-9]) # pyc file and $py.class
+ assert s.endswith("ValueError")
+ assert len(s.split(":")) >= 3 # on windows it's 4
+
+def test_excinfo_errisinstance():
+ excinfo = py.test.raises(ValueError, h)
+ assert excinfo.errisinstance(ValueError)
+
+def test_excinfo_no_sourcecode():
+ try:
+ exec ("raise ValueError()")
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ s = str(excinfo.traceback[-1])
+ if py.std.sys.version_info < (2,5):
+ assert s == " File '<string>':1 in ?\n ???\n"
+ else:
+ assert s == " File '<string>':1 in <module>\n ???\n"
+
+def test_excinfo_no_python_sourcecode(tmpdir):
+ #XXX: simplified locally testable version
+ tmpdir.join('test.txt').write("{{ h()}}:")
+
+ jinja2 = py.test.importorskip('jinja2')
+ loader = jinja2.FileSystemLoader(str(tmpdir))
+ env = jinja2.Environment(loader=loader)
+ template = env.get_template('test.txt')
+ excinfo = py.test.raises(ValueError,
+ template.render, h=h)
+ for item in excinfo.traceback:
+ print(item) #XXX: for some reason jinja.Template.render is printed in full
+ item.source # shouldnt fail
+ if item.path.basename == 'test.txt':
+ assert str(item.source) == '{{ h()}}:'
+
+
+def test_entrysource_Queue_example():
+ try:
+ queue.Queue().get(timeout=0.001)
+ except queue.Empty:
+ excinfo = py.code.ExceptionInfo()
+ entry = excinfo.traceback[-1]
+ source = entry.getsource()
+ assert source is not None
+ s = str(source).strip()
+ assert s.startswith("def get")
+
+def test_codepath_Queue_example():
+ try:
+ queue.Queue().get(timeout=0.001)
+ except queue.Empty:
+ excinfo = py.code.ExceptionInfo()
+ entry = excinfo.traceback[-1]
+ path = entry.path
+ assert isinstance(path, py.path.local)
+ assert path.basename.lower() == "queue.py"
+ assert path.check()
+
+class TestFormattedExcinfo:
+ def pytest_funcarg__importasmod(self, request):
+ def importasmod(source):
+ source = py.code.Source(source)
+ tmpdir = request.getfuncargvalue("tmpdir")
+ modpath = tmpdir.join("mod.py")
+ tmpdir.ensure("__init__.py")
+ modpath.write(source)
+ if invalidate_import_caches is not None:
+ invalidate_import_caches()
+ return modpath.pyimport()
+ return importasmod
+
+ def excinfo_from_exec(self, source):
+ source = py.code.Source(source).strip()
+ try:
+ exec (source.compile())
+ except KeyboardInterrupt:
+ raise
+ except:
+ return py.code.ExceptionInfo()
+ assert 0, "did not raise"
+
+ def test_repr_source(self):
+ pr = FormattedExcinfo()
+ source = py.code.Source("""
+ def f(x):
+ pass
+ """).strip()
+ pr.flow_marker = "|"
+ lines = pr.get_source(source, 0)
+ assert len(lines) == 2
+ assert lines[0] == "| def f(x):"
+ assert lines[1] == " pass"
+
+ def test_repr_source_excinfo(self):
+ """ check if indentation is right """
+ pr = FormattedExcinfo()
+ excinfo = self.excinfo_from_exec("""
+ def f():
+ assert 0
+ f()
+ """)
+ pr = FormattedExcinfo()
+ source = pr._getentrysource(excinfo.traceback[-1])
+ lines = pr.get_source(source, 1, excinfo)
+ assert lines == [
+ ' def f():',
+ '> assert 0',
+ 'E assert 0'
+ ]
+
+
+ def test_repr_source_not_existing(self):
+ pr = FormattedExcinfo()
+ co = compile("raise ValueError()", "", "exec")
+ try:
+ exec (co)
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
+
+ def test_repr_many_line_source_not_existing(self):
+ pr = FormattedExcinfo()
+ co = compile("""
+a = 1
+raise ValueError()
+""", "", "exec")
+ try:
+ exec (co)
+ except ValueError:
+ excinfo = py.code.ExceptionInfo()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
+
+ def test_repr_source_failing_fullsource(self):
+ pr = FormattedExcinfo()
+
+ class FakeCode(object):
+ class raw:
+ co_filename = '?'
+ path = '?'
+ firstlineno = 5
+
+ def fullsource(self):
+ return None
+ fullsource = property(fullsource)
+
+ class FakeFrame(object):
+ code = FakeCode()
+ f_locals = {}
+ f_globals = {}
+
+ class FakeTracebackEntry(py.code.Traceback.Entry):
+ def __init__(self, tb):
+ self.lineno = 5+3
+
+ @property
+ def frame(self):
+ return FakeFrame()
+
+ class Traceback(py.code.Traceback):
+ Entry = FakeTracebackEntry
+
+ class FakeExcinfo(py.code.ExceptionInfo):
+ typename = "Foo"
+ def __init__(self):
+ pass
+
+ def exconly(self, tryshort):
+ return "EXC"
+ def errisinstance(self, cls):
+ return False
+
+ excinfo = FakeExcinfo()
+ class FakeRawTB(object):
+ tb_next = None
+ tb = FakeRawTB()
+ excinfo.traceback = Traceback(tb)
+
+ fail = IOError()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
+
+ fail = py.error.ENOENT
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
+
+
+ def test_repr_local(self):
+ p = FormattedExcinfo(showlocals=True)
+ loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
+ reprlocals = p.repr_locals(loc)
+ assert reprlocals.lines
+ assert reprlocals.lines[0] == '__builtins__ = <builtins>'
+ assert reprlocals.lines[1] == 'x = 3'
+ assert reprlocals.lines[2] == 'y = 5'
+ assert reprlocals.lines[3] == 'z = 7'
+
+ def test_repr_tracebackentry_lines(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = py.test.raises(ValueError, mod.func1)
+ excinfo.traceback = excinfo.traceback.filter()
+ p = FormattedExcinfo()
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
+
+ # test as intermittent entry
+ lines = reprtb.lines
+ assert lines[0] == ' def func1():'
+ assert lines[1] == '> raise ValueError("hello\\nworld")'
+
+ # test as last entry
+ p = FormattedExcinfo(showlocals=True)
+ repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = repr_entry.lines
+ assert lines[0] == ' def func1():'
+ assert lines[1] == '> raise ValueError("hello\\nworld")'
+ assert lines[2] == 'E ValueError: hello'
+ assert lines[3] == 'E world'
+ assert not lines[4:]
+
+ loc = repr_entry.reprlocals is not None
+ loc = repr_entry.reprfileloc
+ assert loc.path == mod.__file__
+ assert loc.lineno == 3
+ #assert loc.message == "ValueError: hello"
+
+ def test_repr_tracebackentry_lines(self, importasmod):
+ mod = importasmod("""
+ def func1(m, x, y, z):
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
+ excinfo.traceback = excinfo.traceback.filter()
+ entry = excinfo.traceback[-1]
+ p = FormattedExcinfo(funcargs=True)
+ reprfuncargs = p.repr_args(entry)
+ assert reprfuncargs.args[0] == ('m', repr("m"*90))
+ assert reprfuncargs.args[1] == ('x', '5')
+ assert reprfuncargs.args[2] == ('y', '13')
+ assert reprfuncargs.args[3] == ('z', repr("z" * 120))
+
+ p = FormattedExcinfo(funcargs=True)
+ repr_entry = p.repr_traceback_entry(entry)
+ assert repr_entry.reprfuncargs.args == reprfuncargs.args
+ tw = TWMock()
+ repr_entry.toterminal(tw)
+ assert tw.lines[0] == "m = " + repr('m' * 90)
+ assert tw.lines[1] == "x = 5, y = 13"
+ assert tw.lines[2] == "z = " + repr('z' * 120)
+
+ def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
+ mod = importasmod("""
+ def func1(x, *y, **z):
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
+ excinfo.traceback = excinfo.traceback.filter()
+ entry = excinfo.traceback[-1]
+ p = FormattedExcinfo(funcargs=True)
+ reprfuncargs = p.repr_args(entry)
+ assert reprfuncargs.args[0] == ('x', repr('a'))
+ assert reprfuncargs.args[1] == ('y', repr(('b',)))
+ assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
+
+ p = FormattedExcinfo(funcargs=True)
+ repr_entry = p.repr_traceback_entry(entry)
+ assert repr_entry.reprfuncargs.args == reprfuncargs.args
+ tw = TWMock()
+ repr_entry.toterminal(tw)
+ assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
+
+ def test_repr_tracebackentry_short(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
+ lines = reprtb.lines
+ basename = py.path.local(mod.__file__).basename
+ assert lines[0] == ' func1()'
+ assert basename in str(reprtb.reprfileloc.path)
+ assert reprtb.reprfileloc.lineno == 5
+
+ # test last entry
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprtb.lines
+ assert lines[0] == ' raise ValueError("hello")'
+ assert lines[1] == 'E ValueError: hello'
+ assert basename in str(reprtb.reprfileloc.path)
+ assert reprtb.reprfileloc.lineno == 3
+
+ def test_repr_tracebackentry_no(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(style="no")
+ p.repr_traceback_entry(excinfo.traceback[-2])
+
+ p = FormattedExcinfo(style="no")
+ reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprentry.lines
+ assert lines[0] == 'E ValueError: hello'
+ assert not lines[1:]
+
+ def test_repr_traceback_tbfilter(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(tbfilter=True)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 2
+ p = FormattedExcinfo(tbfilter=False)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 3
+
+ def test_traceback_short_no_source(self, importasmod, monkeypatch):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ from py._code.code import Code
+ monkeypatch.setattr(Code, 'path', 'bogus')
+ excinfo.traceback[0].frame.code.path = "bogus"
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
+ lines = reprtb.lines
+ last_p = FormattedExcinfo(style="short")
+ last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ last_lines = last_reprtb.lines
+ monkeypatch.undo()
+ basename = py.path.local(mod.__file__).basename
+ assert lines[0] == ' func1()'
+
+ assert last_lines[0] == ' raise ValueError("hello")'
+ assert last_lines[1] == 'E ValueError: hello'
+
+ def test_repr_traceback_and_excinfo(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+
+ for style in ("long", "short"):
+ p = FormattedExcinfo(style=style)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 2
+ assert reprtb.style == style
+ assert not reprtb.extraline
+ repr = p.repr_excinfo(excinfo)
+ assert repr.reprtraceback
+ assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
+ assert repr.reprcrash.path.endswith("mod.py")
+ assert repr.reprcrash.message == "ValueError: 0"
+
+ def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+
+ p = FormattedExcinfo()
+ def raiseos():
+ raise OSError(2)
+ monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
+ assert p._makepath(__file__) == __file__
+ reprtb = p.repr_traceback(excinfo)
+
+ def test_repr_excinfo_addouterr(self, importasmod):
+ mod = importasmod("""
+ def entry():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ repr = excinfo.getrepr()
+ repr.addsection("title", "content")
+ twmock = TWMock()
+ repr.toterminal(twmock)
+ assert twmock.lines[-1] == "content"
+ assert twmock.lines[-2] == ("-", "title")
+
+ def test_repr_excinfo_reprcrash(self, importasmod):
+ mod = importasmod("""
+ def entry():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+ repr = excinfo.getrepr()
+ assert repr.reprcrash.path.endswith("mod.py")
+ assert repr.reprcrash.lineno == 3
+ assert repr.reprcrash.message == "ValueError"
+ assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
+
+ def test_repr_traceback_recursion(self, importasmod):
+ mod = importasmod("""
+ def rec2(x):
+ return rec1(x+1)
+ def rec1(x):
+ return rec2(x-1)
+ def entry():
+ rec1(42)
+ """)
+ excinfo = py.test.raises(RuntimeError, mod.entry)
+
+ for style in ("short", "long", "no"):
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback(excinfo)
+ assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
+ assert str(reprtb)
+
+ def test_tb_entry_AssertionError(self, importasmod):
+ # probably this test is a bit redundant
+ # as py/magic/testing/test_assertion.py
+ # already tests correctness of
+ # assertion-reinterpretation logic
+ mod = importasmod("""
+ def somefunc():
+ x = 1
+ assert x == 2
+ """)
+ excinfo = py.test.raises(AssertionError, mod.somefunc)
+
+ p = FormattedExcinfo()
+ reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprentry.lines
+ assert lines[-1] == "E assert 1 == 2"
+
+ def test_reprexcinfo_getrepr(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = py.test.raises(ValueError, mod.entry)
+
+ for style in ("short", "long", "no"):
+ for showlocals in (True, False):
+ repr = excinfo.getrepr(style=style, showlocals=showlocals)
+ assert isinstance(repr, ReprExceptionInfo)
+ assert repr.reprtraceback.style == style
+
+ def test_reprexcinfo_unicode(self):
+ from py._code.code import TerminalRepr
+ class MyRepr(TerminalRepr):
+ def toterminal(self, tw):
+ tw.line(py.builtin._totext("Ñ", "utf-8"))
+ x = py.builtin._totext(MyRepr())
+ assert x == py.builtin._totext("Ñ", "utf-8")
+
+ def test_toterminal_long(self, importasmod):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == " def f():"
+ assert tw.lines[1] == "> g(3)"
+ assert tw.lines[2] == ""
+ assert tw.lines[3].endswith("mod.py:5: ")
+ assert tw.lines[4] == ("_ ", None)
+ assert tw.lines[5] == ""
+ assert tw.lines[6] == " def g(x):"
+ assert tw.lines[7] == "> raise ValueError(x)"
+ assert tw.lines[8] == "E ValueError: 3"
+ assert tw.lines[9] == ""
+ assert tw.lines[10].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_missing_source(self, importasmod, tmpdir):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tmpdir.join('mod.py').remove()
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == "> ???"
+ assert tw.lines[1] == ""
+ assert tw.lines[2].endswith("mod.py:5: ")
+ assert tw.lines[3] == ("_ ", None)
+ assert tw.lines[4] == ""
+ assert tw.lines[5] == "> ???"
+ assert tw.lines[6] == "E ValueError: 3"
+ assert tw.lines[7] == ""
+ assert tw.lines[8].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tmpdir.join('mod.py').write('asdf')
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == "> ???"
+ assert tw.lines[1] == ""
+ assert tw.lines[2].endswith("mod.py:5: ")
+ assert tw.lines[3] == ("_ ", None)
+ assert tw.lines[4] == ""
+ assert tw.lines[5] == "> ???"
+ assert tw.lines[6] == "E ValueError: 3"
+ assert tw.lines[7] == ""
+ assert tw.lines[8].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_filenames(self, importasmod):
+ mod = importasmod("""
+ def f():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tw = TWMock()
+ path = py.path.local(mod.__file__)
+ old = path.dirpath().chdir()
+ try:
+ repr = excinfo.getrepr(abspath=False)
+ repr.toterminal(tw)
+ line = tw.lines[-1]
+ x = py.path.local().bestrelpath(path)
+ if len(x) < len(str(path)):
+ assert line == "mod.py:3: ValueError"
+
+ repr = excinfo.getrepr(abspath=True)
+ repr.toterminal(tw)
+ line = tw.lines[-1]
+ assert line == "%s:3: ValueError" %(path,)
+ finally:
+ old.chdir()
+
+ @py.test.mark.multi(reproptions=[
+ {'style': style, 'showlocals': showlocals,
+ 'funcargs': funcargs, 'tbfilter': tbfilter
+ } for style in ("long", "short", "no")
+ for showlocals in (True, False)
+ for tbfilter in (True, False)
+ for funcargs in (True, False)])
+ def test_format_excinfo(self, importasmod, reproptions):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ tw = py.io.TerminalWriter(stringio=True)
+ repr = excinfo.getrepr(**reproptions)
+ repr.toterminal(tw)
+ assert tw.stringio.getvalue()
+
+
+ def test_native_style(self):
+ excinfo = self.excinfo_from_exec("""
+ assert 0
+ """)
+ repr = excinfo.getrepr(style='native')
+ assert "assert 0" in str(repr.reprcrash)
+ s = str(repr)
+ assert s.startswith('Traceback (most recent call last):\n File')
+ assert s.endswith('\nAssertionError: assert 0')
+ assert 'exec (source.compile())' in s
+ # python 2.4 fails to get the source line for the assert
+ if py.std.sys.version_info >= (2, 5):
+ assert s.count('assert 0') == 2
+
+ def test_traceback_repr_style(self, importasmod):
+ mod = importasmod("""
+ def f():
+ g()
+ def g():
+ h()
+ def h():
+ i()
+ def i():
+ raise ValueError()
+ """)
+ excinfo = py.test.raises(ValueError, mod.f)
+ excinfo.traceback = excinfo.traceback.filter()
+ excinfo.traceback[1].set_repr_style("short")
+ excinfo.traceback[2].set_repr_style("short")
+ r = excinfo.getrepr(style="long")
+ tw = TWMock()
+ r.toterminal(tw)
+ for line in tw.lines: print (line)
+ assert tw.lines[0] == ""
+ assert tw.lines[1] == " def f():"
+ assert tw.lines[2] == "> g()"
+ assert tw.lines[3] == ""
+ assert tw.lines[4].endswith("mod.py:3: ")
+ assert tw.lines[5] == ("_ ", None)
+ assert tw.lines[6].endswith("in g")
+ assert tw.lines[7] == " h()"
+ assert tw.lines[8].endswith("in h")
+ assert tw.lines[9] == " i()"
+ assert tw.lines[10] == ("_ ", None)
+ assert tw.lines[11] == ""
+ assert tw.lines[12] == " def i():"
+ assert tw.lines[13] == "> raise ValueError()"
+ assert tw.lines[14] == "E ValueError"
+ assert tw.lines[15] == ""
+ assert tw.lines[16].endswith("mod.py:9: ValueError")
diff --git a/testing/web-platform/tests/tools/py/testing/code/test_source.py b/testing/web-platform/tests/tools/py/testing/code/test_source.py
new file mode 100644
index 000000000..830de2c95
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/code/test_source.py
@@ -0,0 +1,651 @@
+from py.code import Source
+import py
+import sys
+
+from py._code.source import _ast
+if _ast is not None:
+ astonly = py.test.mark.nothing
+else:
+ astonly = py.test.mark.xfail("True", reason="only works with AST-compile")
+
+failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
+
+def test_source_str_function():
+ x = Source("3")
+ assert str(x) == "3"
+
+ x = Source(" 3")
+ assert str(x) == "3"
+
+ x = Source("""
+ 3
+ """, rstrip=False)
+ assert str(x) == "\n3\n "
+
+ x = Source("""
+ 3
+ """, rstrip=True)
+ assert str(x) == "\n3"
+
+def test_unicode():
+ try:
+ unicode
+ except NameError:
+ return
+ x = Source(unicode("4"))
+ assert str(x) == "4"
+ co = py.code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
+ val = eval(co)
+ assert isinstance(val, unicode)
+
+def test_source_from_function():
+ source = py.code.Source(test_source_str_function)
+ assert str(source).startswith('def test_source_str_function():')
+
+def test_source_from_method():
+ class TestClass:
+ def test_method(self):
+ pass
+ source = py.code.Source(TestClass().test_method)
+ assert source.lines == ["def test_method(self):",
+ " pass"]
+
+def test_source_from_lines():
+ lines = ["a \n", "b\n", "c"]
+ source = py.code.Source(lines)
+ assert source.lines == ['a ', 'b', 'c']
+
+def test_source_from_inner_function():
+ def f():
+ pass
+ source = py.code.Source(f, deindent=False)
+ assert str(source).startswith(' def f():')
+ source = py.code.Source(f)
+ assert str(source).startswith('def f():')
+
+def test_source_putaround_simple():
+ source = Source("raise ValueError")
+ source = source.putaround(
+ "try:", """\
+ except ValueError:
+ x = 42
+ else:
+ x = 23""")
+ assert str(source)=="""\
+try:
+ raise ValueError
+except ValueError:
+ x = 42
+else:
+ x = 23"""
+
+def test_source_putaround():
+ source = Source()
+ source = source.putaround("""
+ if 1:
+ x=1
+ """)
+ assert str(source).strip() == "if 1:\n x=1"
+
+def test_source_strips():
+ source = Source("")
+ assert source == Source()
+ assert str(source) == ''
+ assert source.strip() == source
+
+def test_source_strip_multiline():
+ source = Source()
+ source.lines = ["", " hello", " "]
+ source2 = source.strip()
+ assert source2.lines == [" hello"]
+
+def test_syntaxerror_rerepresentation():
+ ex = py.test.raises(SyntaxError, py.code.compile, 'xyz xyz')
+ assert ex.value.lineno == 1
+ assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
+ assert ex.value.text.strip(), 'x x'
+
+def test_isparseable():
+ assert Source("hello").isparseable()
+ assert Source("if 1:\n pass").isparseable()
+ assert Source(" \nif 1:\n pass").isparseable()
+ assert not Source("if 1:\n").isparseable()
+ assert not Source(" \nif 1:\npass").isparseable()
+ assert not Source(chr(0)).isparseable()
+
+class TestAccesses:
+ source = Source("""\
+ def f(x):
+ pass
+ def g(x):
+ pass
+ """)
+ def test_getrange(self):
+ x = self.source[0:2]
+ assert x.isparseable()
+ assert len(x.lines) == 2
+ assert str(x) == "def f(x):\n pass"
+
+ def test_getline(self):
+ x = self.source[0]
+ assert x == "def f(x):"
+
+ def test_len(self):
+ assert len(self.source) == 4
+
+ def test_iter(self):
+ l = [x for x in self.source]
+ assert len(l) == 4
+
+class TestSourceParsingAndCompiling:
+ source = Source("""\
+ def f(x):
+ assert (x ==
+ 3 +
+ 4)
+ """).strip()
+
+ def test_compile(self):
+ co = py.code.compile("x=3")
+ d = {}
+ exec (co, d)
+ assert d['x'] == 3
+
+ def test_compile_and_getsource_simple(self):
+ co = py.code.compile("x=3")
+ exec (co)
+ source = py.code.Source(co)
+ assert str(source) == "x=3"
+
+ def test_compile_and_getsource_through_same_function(self):
+ def gensource(source):
+ return py.code.compile(source)
+ co1 = gensource("""
+ def f():
+ raise KeyError()
+ """)
+ co2 = gensource("""
+ def f():
+ raise ValueError()
+ """)
+ source1 = py.std.inspect.getsource(co1)
+ assert 'KeyError' in source1
+ source2 = py.std.inspect.getsource(co2)
+ assert 'ValueError' in source2
+
+ def test_getstatement(self):
+ #print str(self.source)
+ ass = str(self.source[1:])
+ for i in range(1, 4):
+ #print "trying start in line %r" % self.source[i]
+ s = self.source.getstatement(i)
+ #x = s.deindent()
+ assert str(s) == ass
+
+ def test_getstatementrange_triple_quoted(self):
+ #print str(self.source)
+ source = Source("""hello('''
+ ''')""")
+ s = source.getstatement(0)
+ assert s == str(source)
+ s = source.getstatement(1)
+ assert s == str(source)
+
+ @astonly
+ def test_getstatementrange_within_constructs(self):
+ source = Source("""\
+ try:
+ try:
+ raise ValueError
+ except SomeThing:
+ pass
+ finally:
+ 42
+ """)
+ assert len(source) == 7
+ # check all lineno's that could occur in a traceback
+ #assert source.getstatementrange(0) == (0, 7)
+ #assert source.getstatementrange(1) == (1, 5)
+ assert source.getstatementrange(2) == (2, 3)
+ assert source.getstatementrange(3) == (3, 4)
+ assert source.getstatementrange(4) == (4, 5)
+ #assert source.getstatementrange(5) == (0, 7)
+ assert source.getstatementrange(6) == (6, 7)
+
+ def test_getstatementrange_bug(self):
+ source = Source("""\
+ try:
+ x = (
+ y +
+ z)
+ except:
+ pass
+ """)
+ assert len(source) == 6
+ assert source.getstatementrange(2) == (1, 4)
+
+ def test_getstatementrange_bug2(self):
+ source = Source("""\
+ assert (
+ 33
+ ==
+ [
+ X(3,
+ b=1, c=2
+ ),
+ ]
+ )
+ """)
+ assert len(source) == 9
+ assert source.getstatementrange(5) == (0, 9)
+
+ def test_getstatementrange_ast_issue58(self):
+ source = Source("""\
+
+ def test_some():
+ for a in [a for a in
+ CAUSE_ERROR]: pass
+
+ x = 3
+ """)
+ assert getstatement(2, source).lines == source.lines[2:3]
+ assert getstatement(3, source).lines == source.lines[3:4]
+
+ @py.test.mark.skipif("sys.version_info < (2,6)")
+ def test_getstatementrange_out_of_bounds_py3(self):
+ source = Source("if xxx:\n from .collections import something")
+ r = source.getstatementrange(1)
+ assert r == (1,2)
+
+ def test_getstatementrange_with_syntaxerror_issue7(self):
+ source = Source(":")
+ py.test.raises(SyntaxError, lambda: source.getstatementrange(0))
+
+ @py.test.mark.skipif("sys.version_info < (2,6)")
+ def test_compile_to_ast(self):
+ import ast
+ source = Source("x = 4")
+ mod = source.compile(flag=ast.PyCF_ONLY_AST)
+ assert isinstance(mod, ast.Module)
+ compile(mod, "<filename>", "exec")
+
+ def test_compile_and_getsource(self):
+ co = self.source.compile()
+ py.builtin.exec_(co, globals())
+ f(7)
+ excinfo = py.test.raises(AssertionError, "f(6)")
+ frame = excinfo.traceback[-1].frame
+ stmt = frame.code.fullsource.getstatement(frame.lineno)
+ #print "block", str(block)
+ assert str(stmt).strip().startswith('assert')
+
+ def test_compilefuncs_and_path_sanity(self):
+ def check(comp, name):
+ co = comp(self.source, name)
+ if not name:
+ expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
+ else:
+ expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
+ fn = co.co_filename
+ assert fn.endswith(expected)
+
+ mycode = py.code.Code(self.test_compilefuncs_and_path_sanity)
+ mylineno = mycode.firstlineno
+ mypath = mycode.path
+
+ for comp in py.code.compile, py.code.Source.compile:
+ for name in '', None, 'my':
+ yield check, comp, name
+
+ def test_offsetless_synerr(self):
+ py.test.raises(SyntaxError, py.code.compile, "lambda a,a: 0", mode='eval')
+
+def test_getstartingblock_singleline():
+ class A:
+ def __init__(self, *args):
+ frame = sys._getframe(1)
+ self.source = py.code.Frame(frame).statement
+
+ x = A('x', 'y')
+
+ l = [i for i in x.source.lines if i.strip()]
+ assert len(l) == 1
+
+def test_getstartingblock_multiline():
+ class A:
+ def __init__(self, *args):
+ frame = sys._getframe(1)
+ self.source = py.code.Frame(frame).statement
+
+ x = A('x',
+ 'y' \
+ ,
+ 'z')
+
+ l = [i for i in x.source.lines if i.strip()]
+ assert len(l) == 4
+
+def test_getline_finally():
+ def c(): pass
+ excinfo = py.test.raises(TypeError, """
+ teardown = None
+ try:
+ c(1)
+ finally:
+ if teardown:
+ teardown()
+ """)
+ source = excinfo.traceback[-1].statement
+ assert str(source).strip() == 'c(1)'
+
+def test_getfuncsource_dynamic():
+ source = """
+ def f():
+ raise ValueError
+
+ def g(): pass
+ """
+ co = py.code.compile(source)
+ py.builtin.exec_(co, globals())
+ assert str(py.code.Source(f)).strip() == 'def f():\n raise ValueError'
+ assert str(py.code.Source(g)).strip() == 'def g(): pass'
+
+
+def test_getfuncsource_with_multine_string():
+ def f():
+ c = '''while True:
+ pass
+'''
+ assert str(py.code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
+
+
+def test_deindent():
+ from py._code.source import deindent as deindent
+ assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
+
+ def f():
+ c = '''while True:
+ pass
+'''
+ import inspect
+ lines = deindent(inspect.getsource(f).splitlines())
+ assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
+
+ source = """
+ def f():
+ def g():
+ pass
+ """
+ lines = deindent(source.splitlines())
+ assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
+
+@py.test.mark.xfail("sys.version_info[:3] < (2,7,0) or "
+ "((3,0) <= sys.version_info[:2] < (3,2))")
+def test_source_of_class_at_eof_without_newline(tmpdir):
+ # this test fails because the implicit inspect.getsource(A) below
+ # does not return the "x = 1" last line.
+ source = py.code.Source('''
+ class A(object):
+ def method(self):
+ x = 1
+ ''')
+ path = tmpdir.join("a.py")
+ path.write(source)
+ s2 = py.code.Source(tmpdir.join("a.py").pyimport().A)
+ assert str(source).strip() == str(s2).strip()
+
+if True:
+ def x():
+ pass
+
+def test_getsource_fallback():
+ from py._code.source import getsource
+ expected = """def x():
+ pass"""
+ src = getsource(x)
+ assert src == expected
+
+def test_idem_compile_and_getsource():
+ from py._code.source import getsource
+ expected = "def x(): pass"
+ co = py.code.compile(expected)
+ src = getsource(co)
+ assert src == expected
+
+def test_findsource_fallback():
+ from py._code.source import findsource
+ src, lineno = findsource(x)
+ assert 'test_findsource_simple' in str(src)
+ assert src[lineno] == ' def x():'
+
+def test_findsource():
+ from py._code.source import findsource
+ co = py.code.compile("""if 1:
+ def x():
+ pass
+""")
+
+ src, lineno = findsource(co)
+ assert 'if 1:' in str(src)
+
+ d = {}
+ eval(co, d)
+ src, lineno = findsource(d['x'])
+ assert 'if 1:' in str(src)
+ assert src[lineno] == " def x():"
+
+
+def test_getfslineno():
+ from py.code import getfslineno
+
+ def f(x):
+ pass
+
+ fspath, lineno = getfslineno(f)
+
+ assert fspath.basename == "test_source.py"
+ assert lineno == py.code.getrawcode(f).co_firstlineno-1 # see findsource
+
+ class A(object):
+ pass
+
+ fspath, lineno = getfslineno(A)
+
+ _, A_lineno = py.std.inspect.findsource(A)
+ assert fspath.basename == "test_source.py"
+ assert lineno == A_lineno
+
+ assert getfslineno(3) == ("", -1)
+ class B:
+ pass
+ B.__name__ = "B2"
+ assert getfslineno(B)[1] == -1
+
+def test_code_of_object_instance_with_call():
+ class A:
+ pass
+ py.test.raises(TypeError, lambda: py.code.Source(A()))
+ class WithCall:
+ def __call__(self):
+ pass
+
+ code = py.code.Code(WithCall())
+ assert 'pass' in str(code.source())
+
+ class Hello(object):
+ def __call__(self):
+ pass
+ py.test.raises(TypeError, lambda: py.code.Code(Hello))
+
+
+def getstatement(lineno, source):
+ from py._code.source import getstatementrange_ast
+ source = py.code.Source(source, deindent=False)
+ ast, start, end = getstatementrange_ast(lineno, source)
+ return source[start:end]
+
+def test_oneline():
+ source = getstatement(0, "raise ValueError")
+ assert str(source) == "raise ValueError"
+
+def test_comment_and_no_newline_at_end():
+ from py._code.source import getstatementrange_ast
+ source = Source(['def test_basic_complex():',
+ ' assert 1 == 2',
+ '# vim: filetype=pyopencl:fdm=marker'])
+ ast, start, end = getstatementrange_ast(1, source)
+ assert end == 2
+
+def test_oneline_and_comment():
+ source = getstatement(0, "raise ValueError\n#hello")
+ assert str(source) == "raise ValueError"
+
+def test_comments():
+ source = '''def test():
+ "comment 1"
+ x = 1
+ # comment 2
+ # comment 3
+
+ assert False
+
+"""
+comment 4
+"""
+'''
+ for line in range(2,6):
+ assert str(getstatement(line, source)) == ' x = 1'
+ for line in range(6,10):
+ assert str(getstatement(line, source)) == ' assert False'
+ assert str(getstatement(10, source)) == '"""'
+
+def test_comment_in_statement():
+ source = '''test(foo=1,
+ # comment 1
+ bar=2)
+'''
+ for line in range(1,3):
+ assert str(getstatement(line, source)) == \
+ 'test(foo=1,\n # comment 1\n bar=2)'
+
+def test_single_line_else():
+ source = getstatement(1, "if False: 2\nelse: 3")
+ assert str(source) == "else: 3"
+
+def test_single_line_finally():
+ source = getstatement(1, "try: 1\nfinally: 3")
+ assert str(source) == "finally: 3"
+
+def test_issue55():
+ source = ('def round_trip(dinp):\n assert 1 == dinp\n'
+ 'def test_rt():\n round_trip("""\n""")\n')
+ s = getstatement(3, source)
+ assert str(s) == ' round_trip("""\n""")'
+
+
+def XXXtest_multiline():
+ source = getstatement(0, """\
+raise ValueError(
+ 23
+)
+x = 3
+""")
+ assert str(source) == "raise ValueError(\n 23\n)"
+
+class TestTry:
+ pytestmark = astonly
+ source = """\
+try:
+ raise ValueError
+except Something:
+ raise IndexError(1)
+else:
+ raise KeyError()
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " raise ValueError"
+
+ def test_except_line(self):
+ source = getstatement(2, self.source)
+ assert str(source) == "except Something:"
+
+ def test_except_body(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " raise IndexError(1)"
+
+ def test_else(self):
+ source = getstatement(5, self.source)
+ assert str(source) == " raise KeyError()"
+
+class TestTryFinally:
+ source = """\
+try:
+ raise ValueError
+finally:
+ raise IndexError(1)
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " raise ValueError"
+
+ def test_finally(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " raise IndexError(1)"
+
+
+
+class TestIf:
+ pytestmark = astonly
+ source = """\
+if 1:
+ y = 3
+elif False:
+ y = 5
+else:
+ y = 7
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " y = 3"
+
+ def test_elif_clause(self):
+ source = getstatement(2, self.source)
+ assert str(source) == "elif False:"
+
+ def test_elif(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " y = 5"
+
+ def test_else(self):
+ source = getstatement(5, self.source)
+ assert str(source) == " y = 7"
+
+def test_semicolon():
+ s = """\
+hello ; pytest.skip()
+"""
+ source = getstatement(0, s)
+ assert str(source) == s.strip()
+
+def test_def_online():
+ s = """\
+def func(): raise ValueError(42)
+
+def something():
+ pass
+"""
+ source = getstatement(0, s)
+ assert str(source) == "def func(): raise ValueError(42)"
+
+def XXX_test_expression_multiline():
+ source = """\
+something
+'''
+'''"""
+ result = getstatement(1, source)
+ assert str(result) == "'''\n'''"
+
diff --git a/testing/web-platform/tests/tools/py/testing/conftest.py b/testing/web-platform/tests/tools/py/testing/conftest.py
new file mode 100644
index 000000000..0f956b3dd
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/conftest.py
@@ -0,0 +1,3 @@
+
+pytest_plugins = "pytester",
+
diff --git a/testing/web-platform/tests/tools/py/testing/io_/__init__.py b/testing/web-platform/tests/tools/py/testing/io_/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/io_/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/py/testing/io_/test_capture.py b/testing/web-platform/tests/tools/py/testing/io_/test_capture.py
new file mode 100644
index 000000000..5745e12a1
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/io_/test_capture.py
@@ -0,0 +1,501 @@
+from __future__ import with_statement
+
+import os, sys
+import py
+
+needsdup = py.test.mark.skipif("not hasattr(os, 'dup')")
+
+from py.builtin import print_
+
+if sys.version_info >= (3,0):
+ def tobytes(obj):
+ if isinstance(obj, str):
+ obj = obj.encode('UTF-8')
+ assert isinstance(obj, bytes)
+ return obj
+ def totext(obj):
+ if isinstance(obj, bytes):
+ obj = str(obj, 'UTF-8')
+ assert isinstance(obj, str)
+ return obj
+else:
+ def tobytes(obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode('UTF-8')
+ assert isinstance(obj, str)
+ return obj
+ def totext(obj):
+ if isinstance(obj, str):
+ obj = unicode(obj, 'UTF-8')
+ assert isinstance(obj, unicode)
+ return obj
+
+def oswritebytes(fd, obj):
+ os.write(fd, tobytes(obj))
+
+class TestTextIO:
+ def test_text(self):
+ f = py.io.TextIO()
+ f.write("hello")
+ s = f.getvalue()
+ assert s == "hello"
+ f.close()
+
+ def test_unicode_and_str_mixture(self):
+ f = py.io.TextIO()
+ if sys.version_info >= (3,0):
+ f.write("\u00f6")
+ py.test.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
+ else:
+ f.write(unicode("\u00f6", 'UTF-8'))
+ f.write("hello") # bytes
+ s = f.getvalue()
+ f.close()
+ assert isinstance(s, unicode)
+
+def test_bytes_io():
+ f = py.io.BytesIO()
+ f.write(tobytes("hello"))
+ py.test.raises(TypeError, "f.write(totext('hello'))")
+ s = f.getvalue()
+ assert s == tobytes("hello")
+
+def test_dontreadfrominput():
+ from py._io.capture import DontReadFromInput
+ f = DontReadFromInput()
+ assert not f.isatty()
+ py.test.raises(IOError, f.read)
+ py.test.raises(IOError, f.readlines)
+ py.test.raises(IOError, iter, f)
+ py.test.raises(ValueError, f.fileno)
+ f.close() # just for completeness
+
+def pytest_funcarg__tmpfile(request):
+ testdir = request.getfuncargvalue("testdir")
+ f = testdir.makepyfile("").open('wb+')
+ request.addfinalizer(f.close)
+ return f
+
+@needsdup
+def test_dupfile(tmpfile):
+ flist = []
+ for i in range(5):
+ nf = py.io.dupfile(tmpfile, encoding="utf-8")
+ assert nf != tmpfile
+ assert nf.fileno() != tmpfile.fileno()
+ assert nf not in flist
+ print_(i, end="", file=nf)
+ flist.append(nf)
+ for i in range(5):
+ f = flist[i]
+ f.close()
+ tmpfile.seek(0)
+ s = tmpfile.read()
+ assert "01234" in repr(s)
+ tmpfile.close()
+
+def test_dupfile_no_mode():
+ """
+ dupfile should trap an AttributeError and return f if no mode is supplied.
+ """
+ class SomeFileWrapper(object):
+ "An object with a fileno method but no mode attribute"
+ def fileno(self):
+ return 1
+ tmpfile = SomeFileWrapper()
+ assert py.io.dupfile(tmpfile) is tmpfile
+ with py.test.raises(AttributeError):
+ py.io.dupfile(tmpfile, raising=True)
+
+def lsof_check(func):
+ pid = os.getpid()
+ try:
+ out = py.process.cmdexec("lsof -p %d" % pid)
+ except py.process.cmdexec.Error:
+ py.test.skip("could not run 'lsof'")
+ func()
+ out2 = py.process.cmdexec("lsof -p %d" % pid)
+ len1 = len([x for x in out.split("\n") if "REG" in x])
+ len2 = len([x for x in out2.split("\n") if "REG" in x])
+ assert len2 < len1 + 3, out2
+
+class TestFDCapture:
+ pytestmark = needsdup
+
+ def test_not_now(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = py.io.FDCapture(fd, now=False)
+ data = tobytes("hello")
+ os.write(fd, data)
+ f = cap.done()
+ s = f.read()
+ assert not s
+ cap = py.io.FDCapture(fd, now=False)
+ cap.start()
+ os.write(fd, data)
+ f = cap.done()
+ s = f.read()
+ assert s == "hello"
+
+ def test_simple(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = py.io.FDCapture(fd)
+ data = tobytes("hello")
+ os.write(fd, data)
+ f = cap.done()
+ s = f.read()
+ assert s == "hello"
+ f.close()
+
+ def test_simple_many(self, tmpfile):
+ for i in range(10):
+ self.test_simple(tmpfile)
+
+ def test_simple_many_check_open_files(self, tmpfile):
+ lsof_check(lambda: self.test_simple_many(tmpfile))
+
+ def test_simple_fail_second_start(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = py.io.FDCapture(fd)
+ f = cap.done()
+ py.test.raises(ValueError, cap.start)
+ f.close()
+
+ def test_stderr(self):
+ cap = py.io.FDCapture(2, patchsys=True)
+ print_("hello", file=sys.stderr)
+ f = cap.done()
+ s = f.read()
+ assert s == "hello\n"
+
+ def test_stdin(self, tmpfile):
+ tmpfile.write(tobytes("3"))
+ tmpfile.seek(0)
+ cap = py.io.FDCapture(0, tmpfile=tmpfile)
+ # check with os.read() directly instead of raw_input(), because
+ # sys.stdin itself may be redirected (as py.test now does by default)
+ x = os.read(0, 100).strip()
+ f = cap.done()
+ assert x == tobytes("3")
+
+ def test_writeorg(self, tmpfile):
+ data1, data2 = tobytes("foo"), tobytes("bar")
+ try:
+ cap = py.io.FDCapture(tmpfile.fileno())
+ tmpfile.write(data1)
+ cap.writeorg(data2)
+ finally:
+ tmpfile.close()
+ f = cap.done()
+ scap = f.read()
+ assert scap == totext(data1)
+ stmp = open(tmpfile.name, 'rb').read()
+ assert stmp == data2
+
+
+class TestStdCapture:
+ def getcapture(self, **kw):
+ return py.io.StdCapture(**kw)
+
+ def test_capturing_done_simple(self):
+ cap = self.getcapture()
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ outfile, errfile = cap.done()
+ s = outfile.read()
+ assert s == "hello"
+ s = errfile.read()
+ assert s == "world"
+
+ def test_capturing_reset_simple(self):
+ cap = self.getcapture()
+ print("hello world")
+ sys.stderr.write("hello error\n")
+ out, err = cap.reset()
+ assert out == "hello world\n"
+ assert err == "hello error\n"
+
+ def test_capturing_readouterr(self):
+ cap = self.getcapture()
+ try:
+ print ("hello world")
+ sys.stderr.write("hello error\n")
+ out, err = cap.readouterr()
+ assert out == "hello world\n"
+ assert err == "hello error\n"
+ sys.stderr.write("error2")
+ finally:
+ out, err = cap.reset()
+ assert err == "error2"
+
+ def test_capturing_readouterr_unicode(self):
+ cap = self.getcapture()
+ print ("hx\xc4\x85\xc4\x87")
+ out, err = cap.readouterr()
+ assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
+
+ @py.test.mark.skipif('sys.version_info >= (3,)',
+ reason='text output different for bytes on python3')
+ def test_capturing_readouterr_decode_error_handling(self):
+ cap = self.getcapture()
+ # triggered a internal error in pytest
+ print('\xa6')
+ out, err = cap.readouterr()
+ assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
+
+ def test_capturing_mixed(self):
+ cap = self.getcapture(mixed=True)
+ sys.stdout.write("hello ")
+ sys.stderr.write("world")
+ sys.stdout.write(".")
+ out, err = cap.reset()
+ assert out.strip() == "hello world."
+ assert not err
+
+ def test_reset_twice_error(self):
+ cap = self.getcapture()
+ print ("hello")
+ out, err = cap.reset()
+ py.test.raises(ValueError, cap.reset)
+ assert out == "hello\n"
+ assert not err
+
+ def test_capturing_modify_sysouterr_in_between(self):
+ oldout = sys.stdout
+ olderr = sys.stderr
+ cap = self.getcapture()
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ sys.stdout = py.io.TextIO()
+ sys.stderr = py.io.TextIO()
+ print ("not seen")
+ sys.stderr.write("not seen\n")
+ out, err = cap.reset()
+ assert out == "hello"
+ assert err == "world"
+ assert sys.stdout == oldout
+ assert sys.stderr == olderr
+
+ def test_capturing_error_recursive(self):
+ cap1 = self.getcapture()
+ print ("cap1")
+ cap2 = self.getcapture()
+ print ("cap2")
+ out2, err2 = cap2.reset()
+ out1, err1 = cap1.reset()
+ assert out1 == "cap1\n"
+ assert out2 == "cap2\n"
+
+ def test_just_out_capture(self):
+ cap = self.getcapture(out=True, err=False)
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.reset()
+ assert out == "hello"
+ assert not err
+
+ def test_just_err_capture(self):
+ cap = self.getcapture(out=False, err=True)
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.reset()
+ assert err == "world"
+ assert not out
+
+ def test_stdin_restored(self):
+ old = sys.stdin
+ cap = self.getcapture(in_=True)
+ newstdin = sys.stdin
+ out, err = cap.reset()
+ assert newstdin != sys.stdin
+ assert sys.stdin is old
+
+ def test_stdin_nulled_by_default(self):
+ print ("XXX this test may well hang instead of crashing")
+ print ("XXX which indicates an error in the underlying capturing")
+ print ("XXX mechanisms")
+ cap = self.getcapture()
+ py.test.raises(IOError, "sys.stdin.read()")
+ out, err = cap.reset()
+
+ def test_suspend_resume(self):
+ cap = self.getcapture(out=True, err=False, in_=False)
+ try:
+ print ("hello")
+ sys.stderr.write("error\n")
+ out, err = cap.suspend()
+ assert out == "hello\n"
+ assert not err
+ print ("in between")
+ sys.stderr.write("in between\n")
+ cap.resume()
+ print ("after")
+ sys.stderr.write("error_after\n")
+ finally:
+ out, err = cap.reset()
+ assert out == "after\n"
+ assert not err
+
+class TestStdCaptureNotNow(TestStdCapture):
+ def getcapture(self, **kw):
+ kw['now'] = False
+ cap = py.io.StdCapture(**kw)
+ cap.startall()
+ return cap
+
+class TestStdCaptureFD(TestStdCapture):
+ pytestmark = needsdup
+
+ def getcapture(self, **kw):
+ return py.io.StdCaptureFD(**kw)
+
+ def test_intermingling(self):
+ cap = self.getcapture()
+ oswritebytes(1, "1")
+ sys.stdout.write(str(2))
+ sys.stdout.flush()
+ oswritebytes(1, "3")
+ oswritebytes(2, "a")
+ sys.stderr.write("b")
+ sys.stderr.flush()
+ oswritebytes(2, "c")
+ out, err = cap.reset()
+ assert out == "123"
+ assert err == "abc"
+
+ def test_callcapture(self):
+ def func(x, y):
+ print (x)
+ py.std.sys.stderr.write(str(y))
+ return 42
+
+ res, out, err = py.io.StdCaptureFD.call(func, 3, y=4)
+ assert res == 42
+ assert out.startswith("3")
+ assert err.startswith("4")
+
+ def test_many(self, capfd):
+ def f():
+ for i in range(10):
+ cap = py.io.StdCaptureFD()
+ cap.reset()
+ lsof_check(f)
+
+class TestStdCaptureFDNotNow(TestStdCaptureFD):
+ pytestmark = needsdup
+
+ def getcapture(self, **kw):
+ kw['now'] = False
+ cap = py.io.StdCaptureFD(**kw)
+ cap.startall()
+ return cap
+
+@needsdup
+def test_stdcapture_fd_tmpfile(tmpfile):
+ capfd = py.io.StdCaptureFD(out=tmpfile)
+ os.write(1, "hello".encode("ascii"))
+ os.write(2, "world".encode("ascii"))
+ outf, errf = capfd.done()
+ assert outf == tmpfile
+
+class TestStdCaptureFDinvalidFD:
+ pytestmark = needsdup
+ def test_stdcapture_fd_invalid_fd(self, testdir):
+ testdir.makepyfile("""
+ import py, os
+ def test_stdout():
+ os.close(1)
+ cap = py.io.StdCaptureFD(out=True, err=False, in_=False)
+ cap.done()
+ def test_stderr():
+ os.close(2)
+ cap = py.io.StdCaptureFD(out=False, err=True, in_=False)
+ cap.done()
+ def test_stdin():
+ os.close(0)
+ cap = py.io.StdCaptureFD(out=False, err=False, in_=True)
+ cap.done()
+ """)
+ result = testdir.runpytest("--capture=fd")
+ assert result.ret == 0
+ assert result.parseoutcomes()['passed'] == 3
+
+def test_capture_not_started_but_reset():
+ capsys = py.io.StdCapture(now=False)
+ capsys.done()
+ capsys.done()
+ capsys.reset()
+
+@needsdup
+def test_capture_no_sys():
+ capsys = py.io.StdCapture()
+ try:
+ cap = py.io.StdCaptureFD(patchsys=False)
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ oswritebytes(1, "1")
+ oswritebytes(2, "2")
+ out, err = cap.reset()
+ assert out == "1"
+ assert err == "2"
+ finally:
+ capsys.reset()
+
+@needsdup
+def test_callcapture_nofd():
+ def func(x, y):
+ oswritebytes(1, "hello")
+ oswritebytes(2, "hello")
+ print (x)
+ sys.stderr.write(str(y))
+ return 42
+
+ capfd = py.io.StdCaptureFD(patchsys=False)
+ try:
+ res, out, err = py.io.StdCapture.call(func, 3, y=4)
+ finally:
+ capfd.reset()
+ assert res == 42
+ assert out.startswith("3")
+ assert err.startswith("4")
+
+@needsdup
+@py.test.mark.multi(use=[True, False])
+def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
+ if not use:
+ tmpfile = True
+ cap = py.io.StdCaptureFD(out=False, err=tmpfile, now=False)
+ cap.startall()
+ capfile = cap.err.tmpfile
+ cap.suspend()
+ cap.resume()
+ capfile2 = cap.err.tmpfile
+ assert capfile2 == capfile
+
+@py.test.mark.multi(method=['StdCapture', 'StdCaptureFD'])
+def test_capturing_and_logging_fundamentals(testdir, method):
+ if method == "StdCaptureFD" and not hasattr(os, 'dup'):
+ py.test.skip("need os.dup")
+ # here we check a fundamental feature
+ p = testdir.makepyfile("""
+ import sys, os
+ import py, logging
+ cap = py.io.%s(out=False, in_=False)
+
+ logging.warn("hello1")
+ outerr = cap.suspend()
+ print ("suspend, captured %%s" %%(outerr,))
+ logging.warn("hello2")
+
+ cap.resume()
+ logging.warn("hello3")
+
+ outerr = cap.suspend()
+ print ("suspend2, captured %%s" %% (outerr,))
+ """ % (method,))
+ result = testdir.runpython(p)
+ result.stdout.fnmatch_lines([
+ "suspend, captured*hello1*",
+ "suspend2, captured*hello2*WARNING:root:hello3*",
+ ])
+ assert "atexit" not in result.stderr.str()
diff --git a/testing/web-platform/tests/tools/py/testing/io_/test_saferepr.py b/testing/web-platform/tests/tools/py/testing/io_/test_saferepr.py
new file mode 100644
index 000000000..1ed9c4faf
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/io_/test_saferepr.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import generators
+import py
+import sys
+
+saferepr = py.io.saferepr
+
+class TestSafeRepr:
+ def test_simple_repr(self):
+ assert saferepr(1) == '1'
+ assert saferepr(None) == 'None'
+
+ def test_maxsize(self):
+ s = saferepr('x'*50, maxsize=25)
+ assert len(s) == 25
+ expected = repr('x'*10 + '...' + 'x'*10)
+ assert s == expected
+
+ def test_maxsize_error_on_instance(self):
+ class A:
+ def __repr__(self):
+ raise ValueError('...')
+
+ s = saferepr(('*'*50, A()), maxsize=25)
+ assert len(s) == 25
+ assert s[0] == '(' and s[-1] == ')'
+
+ def test_exceptions(self):
+ class BrokenRepr:
+ def __init__(self, ex):
+ self.ex = ex
+ foo = 0
+ def __repr__(self):
+ raise self.ex
+ class BrokenReprException(Exception):
+ __str__ = None
+ __repr__ = None
+ assert 'Exception' in saferepr(BrokenRepr(Exception("broken")))
+ s = saferepr(BrokenReprException("really broken"))
+ assert 'TypeError' in s
+ if py.std.sys.version_info < (2,6):
+ assert 'unknown' in saferepr(BrokenRepr("string"))
+ else:
+ assert 'TypeError' in saferepr(BrokenRepr("string"))
+
+ s2 = saferepr(BrokenRepr(BrokenReprException('omg even worse')))
+ assert 'NameError' not in s2
+ assert 'unknown' in s2
+
+ def test_big_repr(self):
+ from py._io.saferepr import SafeRepr
+ assert len(saferepr(range(1000))) <= \
+ len('[' + SafeRepr().maxlist * "1000" + ']')
+
+ def test_repr_on_newstyle(self):
+ class Function(object):
+ def __repr__(self):
+ return "<%s>" %(self.name)
+ try:
+ s = saferepr(Function())
+ except Exception:
+ py.test.fail("saferepr failed for newstyle class")
+
+ def test_unicode(self):
+ val = py.builtin._totext('£€', 'utf-8')
+ reprval = py.builtin._totext("'£€'", 'utf-8')
+ assert saferepr(val) == reprval
+
+def test_unicode_handling():
+ value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
+ def f():
+ raise Exception(value)
+ excinfo = py.test.raises(Exception, f)
+ s = str(excinfo)
+ if sys.version_info[0] < 3:
+ u = unicode(excinfo)
+
diff --git a/testing/web-platform/tests/tools/py/testing/io_/test_terminalwriter.py b/testing/web-platform/tests/tools/py/testing/io_/test_terminalwriter.py
new file mode 100644
index 000000000..0a15541bd
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/io_/test_terminalwriter.py
@@ -0,0 +1,271 @@
+
+import py
+import os, sys
+from py._io import terminalwriter
+import codecs
+import pytest
+
+def test_get_terminal_width():
+ x = py.io.get_terminal_width
+ assert x == terminalwriter.get_terminal_width
+
+def test_getdimensions(monkeypatch):
+ fcntl = py.test.importorskip("fcntl")
+ import struct
+ l = []
+ monkeypatch.setattr(fcntl, 'ioctl', lambda *args: l.append(args))
+ try:
+ terminalwriter._getdimensions()
+ except (TypeError, struct.error):
+ pass
+ assert len(l) == 1
+ assert l[0][0] == 1
+
+def test_terminal_width_COLUMNS(monkeypatch):
+ """ Dummy test for get_terminal_width
+ """
+ fcntl = py.test.importorskip("fcntl")
+ monkeypatch.setattr(fcntl, 'ioctl', lambda *args: int('x'))
+ monkeypatch.setenv('COLUMNS', '42')
+ assert terminalwriter.get_terminal_width() == 42
+ monkeypatch.delenv('COLUMNS', raising=False)
+
+def test_terminalwriter_defaultwidth_80(monkeypatch):
+ monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: 0/0)
+ monkeypatch.delenv('COLUMNS', raising=False)
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 80
+
+def test_terminalwriter_getdimensions_bogus(monkeypatch):
+ monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (10,10))
+ monkeypatch.delenv('COLUMNS', raising=False)
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 80
+
+def test_terminalwriter_getdimensions_emacs(monkeypatch):
+ # emacs terminal returns (0,0) but set COLUMNS properly
+ monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (0,0))
+ monkeypatch.setenv('COLUMNS', '42')
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 42
+
+def test_terminalwriter_computes_width(monkeypatch):
+ monkeypatch.setattr(terminalwriter, 'get_terminal_width', lambda: 42)
+ tw = py.io.TerminalWriter()
+ assert tw.fullwidth == 42
+
+def test_terminalwriter_default_instantiation():
+ tw = py.io.TerminalWriter(stringio=True)
+ assert hasattr(tw, 'stringio')
+
+def test_terminalwriter_dumb_term_no_markup(monkeypatch):
+ monkeypatch.setattr(os, 'environ', {'TERM': 'dumb', 'PATH': ''})
+ class MyFile:
+ closed = False
+ def isatty(self):
+ return True
+ monkeypatch.setattr(sys, 'stdout', MyFile())
+ try:
+ assert sys.stdout.isatty()
+ tw = py.io.TerminalWriter()
+ assert not tw.hasmarkup
+ finally:
+ monkeypatch.undo()
+
+def test_terminalwriter_file_unicode(tmpdir):
+ f = py.std.codecs.open(str(tmpdir.join("xyz")), "wb", "utf8")
+ tw = py.io.TerminalWriter(file=f)
+ assert tw.encoding == "utf8"
+
+def test_unicode_encoding():
+ msg = py.builtin._totext('b\u00f6y', 'utf8')
+ for encoding in 'utf8', 'latin1':
+ l = []
+ tw = py.io.TerminalWriter(l.append, encoding=encoding)
+ tw.line(msg)
+ assert l[0].strip() == msg.encode(encoding)
+
+@pytest.mark.parametrize("encoding", ["ascii"])
+def test_unicode_on_file_with_ascii_encoding(tmpdir, monkeypatch, encoding):
+ msg = py.builtin._totext('hell\xf6', "latin1")
+ #pytest.raises(UnicodeEncodeError, lambda: bytes(msg))
+ f = py.std.codecs.open(str(tmpdir.join("x")), "w", encoding)
+ tw = py.io.TerminalWriter(f)
+ tw.line(msg)
+ f.close()
+ s = tmpdir.join("x").open("rb").read().strip()
+ assert encoding == "ascii"
+ assert s == msg.encode("unicode-escape")
+
+
+win32 = int(sys.platform == "win32")
+class TestTerminalWriter:
+ def pytest_generate_tests(self, metafunc):
+ if "tw" in metafunc.funcargnames:
+ metafunc.addcall(id="path", param="path")
+ metafunc.addcall(id="stringio", param="stringio")
+ metafunc.addcall(id="callable", param="callable")
+ def pytest_funcarg__tw(self, request):
+ if request.param == "path":
+ tmpdir = request.getfuncargvalue("tmpdir")
+ p = tmpdir.join("tmpfile")
+ f = codecs.open(str(p), 'w+', encoding='utf8')
+ tw = py.io.TerminalWriter(f)
+ def getlines():
+ tw._file.flush()
+ return codecs.open(str(p), 'r',
+ encoding='utf8').readlines()
+ elif request.param == "stringio":
+ tw = py.io.TerminalWriter(stringio=True)
+ def getlines():
+ tw.stringio.seek(0)
+ return tw.stringio.readlines()
+ elif request.param == "callable":
+ writes = []
+ tw = py.io.TerminalWriter(writes.append)
+ def getlines():
+ io = py.io.TextIO()
+ io.write("".join(writes))
+ io.seek(0)
+ return io.readlines()
+ tw.getlines = getlines
+ tw.getvalue = lambda: "".join(getlines())
+ return tw
+
+ def test_line(self, tw):
+ tw.line("hello")
+ l = tw.getlines()
+ assert len(l) == 1
+ assert l[0] == "hello\n"
+
+ def test_line_unicode(self, tw):
+ for encoding in 'utf8', 'latin1':
+ tw._encoding = encoding
+ msg = py.builtin._totext('b\u00f6y', 'utf8')
+ tw.line(msg)
+ l = tw.getlines()
+ assert l[0] == msg + "\n"
+
+ def test_sep_no_title(self, tw):
+ tw.sep("-", fullwidth=60)
+ l = tw.getlines()
+ assert len(l) == 1
+ assert l[0] == "-" * (60-win32) + "\n"
+
+ def test_sep_with_title(self, tw):
+ tw.sep("-", "hello", fullwidth=60)
+ l = tw.getlines()
+ assert len(l) == 1
+ assert l[0] == "-" * 26 + " hello " + "-" * (27-win32) + "\n"
+
+ @py.test.mark.skipif("sys.platform == 'win32'")
+ def test__escaped(self, tw):
+ text2 = tw._escaped("hello", (31))
+ assert text2.find("hello") != -1
+
+ @py.test.mark.skipif("sys.platform == 'win32'")
+ def test_markup(self, tw):
+ for bold in (True, False):
+ for color in ("red", "green"):
+ text2 = tw.markup("hello", **{color: True, 'bold': bold})
+ assert text2.find("hello") != -1
+ py.test.raises(ValueError, "tw.markup('x', wronkw=3)")
+ py.test.raises(ValueError, "tw.markup('x', wronkw=0)")
+
+ def test_line_write_markup(self, tw):
+ tw.hasmarkup = True
+ tw.line("x", bold=True)
+ tw.write("x\n", red=True)
+ l = tw.getlines()
+ if sys.platform != "win32":
+ assert len(l[0]) >= 2, l
+ assert len(l[1]) >= 2, l
+
+ def test_attr_fullwidth(self, tw):
+ tw.sep("-", "hello", fullwidth=70)
+ tw.fullwidth = 70
+ tw.sep("-", "hello")
+ l = tw.getlines()
+ assert len(l[0]) == len(l[1])
+
+ def test_reline(self, tw):
+ tw.line("hello")
+ tw.hasmarkup = False
+ pytest.raises(ValueError, lambda: tw.reline("x"))
+ tw.hasmarkup = True
+ tw.reline("0 1 2")
+ tw.getlines()
+ l = tw.getvalue().split("\n")
+ assert len(l) == 2
+ tw.reline("0 1 3")
+ l = tw.getvalue().split("\n")
+ assert len(l) == 2
+ assert l[1].endswith("0 1 3\r")
+ tw.line("so")
+ l = tw.getvalue().split("\n")
+ assert len(l) == 3
+ assert l[-1] == ""
+ assert l[1] == ("0 1 2\r0 1 3\rso ")
+ assert l[0] == "hello"
+
+
+def test_terminal_with_callable_write_and_flush():
+ l = set()
+ class fil:
+ flush = lambda self: l.add("1")
+ write = lambda self, x: l.add("1")
+ __call__ = lambda self, x: l.add("2")
+
+ tw = py.io.TerminalWriter(fil())
+ tw.line("hello")
+ assert l == set(["1"])
+ del fil.flush
+ l.clear()
+ tw = py.io.TerminalWriter(fil())
+ tw.line("hello")
+ assert l == set(["2"])
+
+
+@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
+def test_attr_hasmarkup():
+ tw = py.io.TerminalWriter(stringio=True)
+ assert not tw.hasmarkup
+ tw.hasmarkup = True
+ tw.line("hello", bold=True)
+ s = tw.stringio.getvalue()
+ assert len(s) > len("hello\n")
+ assert '\x1b[1m' in s
+ assert '\x1b[0m' in s
+
+@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
+def test_ansi_print():
+ # we have no easy way to construct a file that
+ # represents a terminal
+ f = py.io.TextIO()
+ f.isatty = lambda: True
+ py.io.ansi_print("hello", 0x32, file=f)
+ text2 = f.getvalue()
+ assert text2.find("hello") != -1
+ assert len(text2) >= len("hello\n")
+ assert '\x1b[50m' in text2
+ assert '\x1b[0m' in text2
+
+def test_should_do_markup_PY_COLORS_eq_1(monkeypatch):
+ monkeypatch.setitem(os.environ, 'PY_COLORS', '1')
+ tw = py.io.TerminalWriter(stringio=True)
+ assert tw.hasmarkup
+ tw.line("hello", bold=True)
+ s = tw.stringio.getvalue()
+ assert len(s) > len("hello\n")
+ assert '\x1b[1m' in s
+ assert '\x1b[0m' in s
+
+def test_should_do_markup_PY_COLORS_eq_0(monkeypatch):
+ monkeypatch.setitem(os.environ, 'PY_COLORS', '0')
+ f = py.io.TextIO()
+ f.isatty = lambda: True
+ tw = py.io.TerminalWriter(file=f)
+ assert not tw.hasmarkup
+ tw.line("hello", bold=True)
+ s = f.getvalue()
+ assert s == "hello\n"
diff --git a/testing/web-platform/tests/tools/py/testing/log/__init__.py b/testing/web-platform/tests/tools/py/testing/log/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/log/__init__.py
diff --git a/testing/web-platform/tests/tools/py/testing/log/test_log.py b/testing/web-platform/tests/tools/py/testing/log/test_log.py
new file mode 100644
index 000000000..b41bc3a58
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/log/test_log.py
@@ -0,0 +1,190 @@
+import py
+import sys
+
+from py._log.log import default_keywordmapper
+
+callcapture = py.io.StdCapture.call
+
+def setup_module(mod):
+ mod._oldstate = default_keywordmapper.getstate()
+
+def teardown_module(mod):
+ default_keywordmapper.setstate(mod._oldstate)
+
+class TestLogProducer:
+ def setup_method(self, meth):
+ default_keywordmapper.setstate(_oldstate)
+
+ def test_getstate_setstate(self):
+ state = py.log._getstate()
+ py.log.setconsumer("hello", [].append)
+ state2 = py.log._getstate()
+ assert state2 != state
+ py.log._setstate(state)
+ state3 = py.log._getstate()
+ assert state3 == state
+
+ def test_producer_repr(self):
+ d = py.log.Producer("default")
+ assert repr(d).find('default') != -1
+
+ def test_produce_one_keyword(self):
+ l = []
+ py.log.setconsumer('s1', l.append)
+ py.log.Producer('s1')("hello world")
+ assert len(l) == 1
+ msg = l[0]
+ assert msg.content().startswith('hello world')
+ assert msg.prefix() == '[s1] '
+ assert str(msg) == "[s1] hello world"
+
+ def test_producer_class(self):
+ p = py.log.Producer('x1')
+ l = []
+ py.log.setconsumer(p._keywords, l.append)
+ p("hello")
+ assert len(l) == 1
+ assert len(l[0].keywords) == 1
+ assert 'x1' == l[0].keywords[0]
+
+ def test_producer_caching(self):
+ p = py.log.Producer('x1')
+ x2 = p.x2
+ assert x2 is p.x2
+
+class TestLogConsumer:
+ def setup_method(self, meth):
+ default_keywordmapper.setstate(_oldstate)
+ def test_log_none(self):
+ log = py.log.Producer("XXX")
+ l = []
+ py.log.setconsumer('XXX', l.append)
+ log("1")
+ assert l
+ l[:] = []
+ py.log.setconsumer('XXX', None)
+ log("2")
+ assert not l
+
+ def test_log_default_stderr(self):
+ res, out, err = callcapture(py.log.Producer("default"), "hello")
+ assert err.strip() == "[default] hello"
+
+ def test_simple_consumer_match(self):
+ l = []
+ py.log.setconsumer("x1", l.append)
+ p = py.log.Producer("x1 x2")
+ p("hello")
+ assert l
+ assert l[0].content() == "hello"
+
+ def test_simple_consumer_match_2(self):
+ l = []
+ p = py.log.Producer("x1 x2")
+ py.log.setconsumer(p._keywords, l.append)
+ p("42")
+ assert l
+ assert l[0].content() == "42"
+
+ def test_no_auto_producer(self):
+ p = py.log.Producer('x')
+ py.test.raises(AttributeError, "p._x")
+ py.test.raises(AttributeError, "p.x_y")
+
+ def test_setconsumer_with_producer(self):
+ l = []
+ p = py.log.Producer("hello")
+ py.log.setconsumer(p, l.append)
+ p("world")
+ assert str(l[0]) == "[hello] world"
+
+ def test_multi_consumer(self):
+ l = []
+ py.log.setconsumer("x1", l.append)
+ py.log.setconsumer("x1 x2", None)
+ p = py.log.Producer("x1 x2")
+ p("hello")
+ assert not l
+ py.log.Producer("x1")("hello")
+ assert l
+ assert l[0].content() == "hello"
+
+ def test_log_stderr(self):
+ py.log.setconsumer("xyz", py.log.STDOUT)
+ res, out, err = callcapture(py.log.Producer("xyz"), "hello")
+ assert not err
+ assert out.strip() == '[xyz] hello'
+
+ def test_log_file(self, tmpdir):
+ customlog = tmpdir.join('log.out')
+ py.log.setconsumer("default", open(str(customlog), 'w', 1))
+ py.log.Producer("default")("hello world #1")
+ assert customlog.readlines() == ['[default] hello world #1\n']
+
+ py.log.setconsumer("default", py.log.Path(customlog, buffering=False))
+ py.log.Producer("default")("hello world #2")
+ res = customlog.readlines()
+ assert res == ['[default] hello world #2\n'] # no append by default!
+
+ def test_log_file_append_mode(self, tmpdir):
+ logfilefn = tmpdir.join('log_append.out')
+
+ # The append mode is on by default, so we don't need to specify it for File
+ py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
+ buffering=0))
+ assert logfilefn.check()
+ py.log.Producer("default")("hello world #1")
+ lines = logfilefn.readlines()
+ assert lines == ['[default] hello world #1\n']
+ py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
+ buffering=0))
+ py.log.Producer("default")("hello world #1")
+ lines = logfilefn.readlines()
+ assert lines == ['[default] hello world #1\n',
+ '[default] hello world #1\n']
+
+ def test_log_file_delayed_create(self, tmpdir):
+ logfilefn = tmpdir.join('log_create.out')
+
+ py.log.setconsumer("default", py.log.Path(logfilefn,
+ delayed_create=True, buffering=0))
+ assert not logfilefn.check()
+ py.log.Producer("default")("hello world #1")
+ lines = logfilefn.readlines()
+ assert lines == ['[default] hello world #1\n']
+
+ def test_keyword_based_log_files(self, tmpdir):
+ logfiles = []
+ keywords = 'k1 k2 k3'.split()
+ for key in keywords:
+ path = tmpdir.join(key)
+ py.log.setconsumer(key, py.log.Path(path, buffering=0))
+
+ py.log.Producer('k1')('1')
+ py.log.Producer('k2')('2')
+ py.log.Producer('k3')('3')
+
+ for key in keywords:
+ path = tmpdir.join(key)
+ assert path.read().strip() == '[%s] %s' % (key, key[-1])
+
+ # disabled for now; the syslog log file can usually be read only by root
+ # I manually inspected /var/log/messages and the entries were there
+ def no_test_log_syslog(self):
+ py.log.setconsumer("default", py.log.Syslog())
+ py.log.default("hello world #1")
+
+ # disabled for now until I figure out how to read entries in the
+ # Event Logs on Windows
+ # I manually inspected the Application Log and the entries were there
+ def no_test_log_winevent(self):
+ py.log.setconsumer("default", py.log.WinEvent())
+ py.log.default("hello world #1")
+
+ # disabled for now until I figure out how to properly pass the parameters
+ def no_test_log_email(self):
+ py.log.setconsumer("default", py.log.Email(mailhost="gheorghiu.net",
+ fromaddr="grig",
+ toaddrs="grig",
+ subject = "py.log email"))
+ py.log.default("hello world #1")
diff --git a/testing/web-platform/tests/tools/py/testing/log/test_warning.py b/testing/web-platform/tests/tools/py/testing/log/test_warning.py
new file mode 100644
index 000000000..8c89cf8ad
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/log/test_warning.py
@@ -0,0 +1,76 @@
+import pytest
+import py
+
+mypath = py.path.local(__file__).new(ext=".py")
+
+@pytest.mark.xfail
+def test_forwarding_to_warnings_module():
+ pytest.deprecated_call(py.log._apiwarn, "1.3", "..")
+
+def test_apiwarn_functional(recwarn):
+ capture = py.io.StdCapture()
+ py.log._apiwarn("x.y.z", "something", stacklevel=1)
+ out, err = capture.reset()
+ py.builtin.print_("out", out)
+ py.builtin.print_("err", err)
+ assert err.find("x.y.z") != -1
+ lno = py.code.getrawcode(test_apiwarn_functional).co_firstlineno + 2
+ exp = "%s:%s" % (mypath, lno)
+ assert err.find(exp) != -1
+
+def test_stacklevel(recwarn):
+ def f():
+ py.log._apiwarn("x", "some", stacklevel=2)
+ # 3
+ # 4
+ capture = py.io.StdCapture()
+ f()
+ out, err = capture.reset()
+ lno = py.code.getrawcode(test_stacklevel).co_firstlineno + 6
+ warning = str(err)
+ assert warning.find(":%s" % lno) != -1
+
+def test_stacklevel_initpkg_with_resolve(testdir, recwarn):
+ testdir.makepyfile(modabc="""
+ import py
+ def f():
+ py.log._apiwarn("x", "some", stacklevel="apipkg123")
+ """)
+ testdir.makepyfile(apipkg123="""
+ def __getattr__():
+ import modabc
+ modabc.f()
+ """)
+ p = testdir.makepyfile("""
+ import apipkg123
+ apipkg123.__getattr__()
+ """)
+ capture = py.io.StdCapture()
+ p.pyimport()
+ out, err = capture.reset()
+ warning = str(err)
+ loc = 'test_stacklevel_initpkg_with_resolve.py:2'
+ assert warning.find(loc) != -1
+
+def test_stacklevel_initpkg_no_resolve(recwarn):
+ def f():
+ py.log._apiwarn("x", "some", stacklevel="apipkg")
+ capture = py.io.StdCapture()
+ f()
+ out, err = capture.reset()
+ lno = py.code.getrawcode(test_stacklevel_initpkg_no_resolve).co_firstlineno + 2
+ warning = str(err)
+ assert warning.find(":%s" % lno) != -1
+
+
+def test_function(recwarn):
+ capture = py.io.StdCapture()
+ py.log._apiwarn("x.y.z", "something", function=test_function)
+ out, err = capture.reset()
+ py.builtin.print_("out", out)
+ py.builtin.print_("err", err)
+ assert err.find("x.y.z") != -1
+ lno = py.code.getrawcode(test_function).co_firstlineno
+ exp = "%s:%s" % (mypath, lno)
+ assert err.find(exp) != -1
+
diff --git a/testing/web-platform/tests/tools/py/testing/path/common.py b/testing/web-platform/tests/tools/py/testing/path/common.py
new file mode 100644
index 000000000..4834fba12
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/common.py
@@ -0,0 +1,470 @@
+import py
+import sys
+
+class CommonFSTests(object):
+ def test_constructor_equality(self, path1):
+ p = path1.__class__(path1)
+ assert p == path1
+
+ def test_eq_nonstring(self, path1):
+ p1 = path1.join('sampledir')
+ p2 = path1.join('sampledir')
+ assert p1 == p2
+
+ def test_new_identical(self, path1):
+ assert path1 == path1.new()
+
+ def test_join(self, path1):
+ p = path1.join('sampledir')
+ strp = str(p)
+ assert strp.endswith('sampledir')
+ assert strp.startswith(str(path1))
+
+ def test_join_normalized(self, path1):
+ newpath = path1.join(path1.sep+'sampledir')
+ strp = str(newpath)
+ assert strp.endswith('sampledir')
+ assert strp.startswith(str(path1))
+ newpath = path1.join((path1.sep*2) + 'sampledir')
+ strp = str(newpath)
+ assert strp.endswith('sampledir')
+ assert strp.startswith(str(path1))
+
+ def test_join_noargs(self, path1):
+ newpath = path1.join()
+ assert path1 == newpath
+
+ def test_add_something(self, path1):
+ p = path1.join('sample')
+ p = p + 'dir'
+ assert p.check()
+ assert p.exists()
+ assert p.isdir()
+ assert not p.isfile()
+
+ def test_parts(self, path1):
+ newpath = path1.join('sampledir', 'otherfile')
+ par = newpath.parts()[-3:]
+ assert par == [path1, path1.join('sampledir'), newpath]
+
+ revpar = newpath.parts(reverse=True)[:3]
+ assert revpar == [newpath, path1.join('sampledir'), path1]
+
+ def test_common(self, path1):
+ other = path1.join('sampledir')
+ x = other.common(path1)
+ assert x == path1
+
+ #def test_parents_nonexisting_file(self, path1):
+ # newpath = path1 / 'dirnoexist' / 'nonexisting file'
+ # par = list(newpath.parents())
+ # assert par[:2] == [path1 / 'dirnoexist', path1]
+
+ def test_basename_checks(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.check(basename='sampledir')
+ assert newpath.check(notbasename='xyz')
+ assert newpath.basename == 'sampledir'
+
+ def test_basename(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.check(basename='sampledir')
+ assert newpath.basename, 'sampledir'
+
+ def test_dirname(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.dirname == str(path1)
+
+ def test_dirpath(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.dirpath() == path1
+
+ def test_dirpath_with_args(self, path1):
+ newpath = path1.join('sampledir')
+ assert newpath.dirpath('x') == path1.join('x')
+
+ def test_newbasename(self, path1):
+ newpath = path1.join('samplefile')
+ newbase = newpath.new(basename="samplefile2")
+ assert newbase.basename == "samplefile2"
+ assert newbase.dirpath() == newpath.dirpath()
+
+ def test_not_exists(self, path1):
+ assert not path1.join('does_not_exist').check()
+ assert path1.join('does_not_exist').check(exists=0)
+
+ def test_exists(self, path1):
+ assert path1.join("samplefile").check()
+ assert path1.join("samplefile").check(exists=1)
+ assert path1.join("samplefile").exists()
+ assert path1.join("samplefile").isfile()
+ assert not path1.join("samplefile").isdir()
+
+ def test_dir(self, path1):
+ #print repr(path1.join("sampledir"))
+ assert path1.join("sampledir").check(dir=1)
+ assert path1.join('samplefile').check(notdir=1)
+ assert not path1.join("samplefile").check(dir=1)
+ assert path1.join("samplefile").exists()
+ assert not path1.join("samplefile").isdir()
+ assert path1.join("samplefile").isfile()
+
+ def test_fnmatch_file(self, path1):
+ assert path1.join("samplefile").check(fnmatch='s*e')
+ assert path1.join("samplefile").fnmatch('s*e')
+ assert not path1.join("samplefile").fnmatch('s*x')
+ assert not path1.join("samplefile").check(fnmatch='s*x')
+
+ #def test_fnmatch_dir(self, path1):
+
+ # pattern = path1.sep.join(['s*file'])
+ # sfile = path1.join("samplefile")
+ # assert sfile.check(fnmatch=pattern)
+
+ def test_relto(self, path1):
+ l=path1.join("sampledir", "otherfile")
+ assert l.relto(path1) == l.sep.join(["sampledir", "otherfile"])
+ assert l.check(relto=path1)
+ assert path1.check(notrelto=l)
+ assert not path1.check(relto=l)
+
+ def test_bestrelpath(self, path1):
+ curdir = path1
+ sep = curdir.sep
+ s = curdir.bestrelpath(curdir)
+ assert s == "."
+ s = curdir.bestrelpath(curdir.join("hello", "world"))
+ assert s == "hello" + sep + "world"
+
+ s = curdir.bestrelpath(curdir.dirpath().join("sister"))
+ assert s == ".." + sep + "sister"
+ assert curdir.bestrelpath(curdir.dirpath()) == ".."
+
+ assert curdir.bestrelpath("hello") == "hello"
+
+ def test_relto_not_relative(self, path1):
+ l1=path1.join("bcde")
+ l2=path1.join("b")
+ assert not l1.relto(l2)
+ assert not l2.relto(l1)
+
+ @py.test.mark.xfail("sys.platform.startswith('java')")
+ def test_listdir(self, path1):
+ l = path1.listdir()
+ assert path1.join('sampledir') in l
+ assert path1.join('samplefile') in l
+ py.test.raises(py.error.ENOTDIR,
+ "path1.join('samplefile').listdir()")
+
+ def test_listdir_fnmatchstring(self, path1):
+ l = path1.listdir('s*dir')
+ assert len(l)
+ assert l[0], path1.join('sampledir')
+
+ def test_listdir_filter(self, path1):
+ l = path1.listdir(lambda x: x.check(dir=1))
+ assert path1.join('sampledir') in l
+ assert not path1.join('samplefile') in l
+
+ def test_listdir_sorted(self, path1):
+ l = path1.listdir(lambda x: x.check(basestarts="sample"), sort=True)
+ assert path1.join('sampledir') == l[0]
+ assert path1.join('samplefile') == l[1]
+ assert path1.join('samplepickle') == l[2]
+
+ def test_visit_nofilter(self, path1):
+ l = []
+ for i in path1.visit():
+ l.append(i.relto(path1))
+ assert "sampledir" in l
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+
+ def test_visit_norecurse(self, path1):
+ l = []
+ for i in path1.visit(None, lambda x: x.basename != "sampledir"):
+ l.append(i.relto(path1))
+ assert "sampledir" in l
+ assert not path1.sep.join(["sampledir", "otherfile"]) in l
+
+ def test_visit_filterfunc_is_string(self, path1):
+ l = []
+ for i in path1.visit('*dir'):
+ l.append(i.relto(path1))
+ assert len(l), 2
+ assert "sampledir" in l
+ assert "otherdir" in l
+
+ @py.test.mark.xfail("sys.platform.startswith('java')")
+ def test_visit_ignore(self, path1):
+ p = path1.join('nonexisting')
+ assert list(p.visit(ignore=py.error.ENOENT)) == []
+
+ def test_visit_endswith(self, path1):
+ l = []
+ for i in path1.visit(lambda x: x.check(endswith="file")):
+ l.append(i.relto(path1))
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+ assert "samplefile" in l
+
+ def test_endswith(self, path1):
+ assert path1.check(notendswith='.py')
+ x = path1.join('samplefile')
+ assert x.check(endswith='file')
+
+ def test_cmp(self, path1):
+ path1 = path1.join('samplefile')
+ path2 = path1.join('samplefile2')
+ assert (path1 < path2) == ('samplefile' < 'samplefile2')
+ assert not (path1 < path1)
+
+ def test_simple_read(self, path1):
+ x = path1.join('samplefile').read('r')
+ assert x == 'samplefile\n'
+
+ def test_join_div_operator(self, path1):
+ newpath = path1 / '/sampledir' / '/test//'
+ newpath2 = path1.join('sampledir', 'test')
+ assert newpath == newpath2
+
+ def test_ext(self, path1):
+ newpath = path1.join('sampledir.ext')
+ assert newpath.ext == '.ext'
+ newpath = path1.join('sampledir')
+ assert not newpath.ext
+
+ def test_purebasename(self, path1):
+ newpath = path1.join('samplefile.py')
+ assert newpath.purebasename == 'samplefile'
+
+ def test_multiple_parts(self, path1):
+ newpath = path1.join('samplefile.py')
+ dirname, purebasename, basename, ext = newpath._getbyspec(
+ 'dirname,purebasename,basename,ext')
+ assert str(path1).endswith(dirname) # be careful with win32 'drive'
+ assert purebasename == 'samplefile'
+ assert basename == 'samplefile.py'
+ assert ext == '.py'
+
+ def test_dotted_name_ext(self, path1):
+ newpath = path1.join('a.b.c')
+ ext = newpath.ext
+ assert ext == '.c'
+ assert newpath.ext == '.c'
+
+ def test_newext(self, path1):
+ newpath = path1.join('samplefile.py')
+ newext = newpath.new(ext='.txt')
+ assert newext.basename == "samplefile.txt"
+ assert newext.purebasename == "samplefile"
+
+ def test_readlines(self, path1):
+ fn = path1.join('samplefile')
+ contents = fn.readlines()
+ assert contents == ['samplefile\n']
+
+ def test_readlines_nocr(self, path1):
+ fn = path1.join('samplefile')
+ contents = fn.readlines(cr=0)
+ assert contents == ['samplefile', '']
+
+ def test_file(self, path1):
+ assert path1.join('samplefile').check(file=1)
+
+ def test_not_file(self, path1):
+ assert not path1.join("sampledir").check(file=1)
+ assert path1.join("sampledir").check(file=0)
+
+ def test_non_existent(self, path1):
+ assert path1.join("sampledir.nothere").check(dir=0)
+ assert path1.join("sampledir.nothere").check(file=0)
+ assert path1.join("sampledir.nothere").check(notfile=1)
+ assert path1.join("sampledir.nothere").check(notdir=1)
+ assert path1.join("sampledir.nothere").check(notexists=1)
+ assert not path1.join("sampledir.nothere").check(notfile=0)
+
+ # pattern = path1.sep.join(['s*file'])
+ # sfile = path1.join("samplefile")
+ # assert sfile.check(fnmatch=pattern)
+
+ def test_size(self, path1):
+ url = path1.join("samplefile")
+ assert url.size() > len("samplefile")
+
+ def test_mtime(self, path1):
+ url = path1.join("samplefile")
+ assert url.mtime() > 0
+
+ def test_relto_wrong_type(self, path1):
+ py.test.raises(TypeError, "path1.relto(42)")
+
+ def test_load(self, path1):
+ p = path1.join('samplepickle')
+ obj = p.load()
+ assert type(obj) is dict
+ assert obj.get('answer',None) == 42
+
+ def test_visit_filesonly(self, path1):
+ l = []
+ for i in path1.visit(lambda x: x.check(file=1)):
+ l.append(i.relto(path1))
+ assert not "sampledir" in l
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+
+ def test_visit_nodotfiles(self, path1):
+ l = []
+ for i in path1.visit(lambda x: x.check(dotfile=0)):
+ l.append(i.relto(path1))
+ assert "sampledir" in l
+ assert path1.sep.join(["sampledir", "otherfile"]) in l
+ assert not ".dotfile" in l
+
+ def test_visit_breadthfirst(self, path1):
+ l = []
+ for i in path1.visit(bf=True):
+ l.append(i.relto(path1))
+ for i, p in enumerate(l):
+ if path1.sep in p:
+ for j in range(i, len(l)):
+ assert path1.sep in l[j]
+ break
+ else:
+ py.test.fail("huh")
+
+ def test_visit_sort(self, path1):
+ l = []
+ for i in path1.visit(bf=True, sort=True):
+ l.append(i.relto(path1))
+ for i, p in enumerate(l):
+ if path1.sep in p:
+ break
+ assert l[:i] == sorted(l[:i])
+ assert l[i:] == sorted(l[i:])
+
+ def test_endswith(self, path1):
+ def chk(p):
+ return p.check(endswith="pickle")
+ assert not chk(path1)
+ assert not chk(path1.join('samplefile'))
+ assert chk(path1.join('somepickle'))
+
+ def test_copy_file(self, path1):
+ otherdir = path1.join('otherdir')
+ initpy = otherdir.join('__init__.py')
+ copied = otherdir.join('copied')
+ initpy.copy(copied)
+ try:
+ assert copied.check()
+ s1 = initpy.read()
+ s2 = copied.read()
+ assert s1 == s2
+ finally:
+ if copied.check():
+ copied.remove()
+
+ def test_copy_dir(self, path1):
+ otherdir = path1.join('otherdir')
+ copied = path1.join('newdir')
+ try:
+ otherdir.copy(copied)
+ assert copied.check(dir=1)
+ assert copied.join('__init__.py').check(file=1)
+ s1 = otherdir.join('__init__.py').read()
+ s2 = copied.join('__init__.py').read()
+ assert s1 == s2
+ finally:
+ if copied.check(dir=1):
+ copied.remove(rec=1)
+
+ def test_remove_file(self, path1):
+ d = path1.ensure('todeleted')
+ assert d.check()
+ d.remove()
+ assert not d.check()
+
+ def test_remove_dir_recursive_by_default(self, path1):
+ d = path1.ensure('to', 'be', 'deleted')
+ assert d.check()
+ p = path1.join('to')
+ p.remove()
+ assert not p.check()
+
+ def test_ensure_dir(self, path1):
+ b = path1.ensure_dir("001", "002")
+ assert b.basename == "002"
+ assert b.isdir()
+
+ def test_mkdir_and_remove(self, path1):
+ tmpdir = path1
+ py.test.raises(py.error.EEXIST, tmpdir.mkdir, 'sampledir')
+ new = tmpdir.join('mktest1')
+ new.mkdir()
+ assert new.check(dir=1)
+ new.remove()
+
+ new = tmpdir.mkdir('mktest')
+ assert new.check(dir=1)
+ new.remove()
+ assert tmpdir.join('mktest') == new
+
+ def test_move_file(self, path1):
+ p = path1.join('samplefile')
+ newp = p.dirpath('moved_samplefile')
+ p.move(newp)
+ try:
+ assert newp.check(file=1)
+ assert not p.check()
+ finally:
+ dp = newp.dirpath()
+ if hasattr(dp, 'revert'):
+ dp.revert()
+ else:
+ newp.move(p)
+ assert p.check()
+
+ def test_move_dir(self, path1):
+ source = path1.join('sampledir')
+ dest = path1.join('moveddir')
+ source.move(dest)
+ assert dest.check(dir=1)
+ assert dest.join('otherfile').check(file=1)
+ assert not source.join('sampledir').check()
+
+def setuptestfs(path):
+ if path.join('samplefile').check():
+ return
+ #print "setting up test fs for", repr(path)
+ samplefile = path.ensure('samplefile')
+ samplefile.write('samplefile\n')
+
+ execfile = path.ensure('execfile')
+ execfile.write('x=42')
+
+ execfilepy = path.ensure('execfile.py')
+ execfilepy.write('x=42')
+
+ d = {1:2, 'hello': 'world', 'answer': 42}
+ path.ensure('samplepickle').dump(d)
+
+ sampledir = path.ensure('sampledir', dir=1)
+ sampledir.ensure('otherfile')
+
+ otherdir = path.ensure('otherdir', dir=1)
+ otherdir.ensure('__init__.py')
+
+ module_a = otherdir.ensure('a.py')
+ if sys.version_info >= (2,6):
+ module_a.write('from .b import stuff as result\n')
+ else:
+ module_a.write('from b import stuff as result\n')
+ module_b = otherdir.ensure('b.py')
+ module_b.write('stuff="got it"\n')
+ module_c = otherdir.ensure('c.py')
+ module_c.write('''import py;
+import otherdir.a
+value = otherdir.a.result
+''')
+ module_d = otherdir.ensure('d.py')
+ module_d.write('''import py;
+from otherdir import a
+value2 = a.result
+''')
diff --git a/testing/web-platform/tests/tools/py/testing/path/conftest.py b/testing/web-platform/tests/tools/py/testing/path/conftest.py
new file mode 100644
index 000000000..a9711b2ce
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/conftest.py
@@ -0,0 +1,80 @@
+import py
+import sys
+from py._path import svnwc as svncommon
+
+svnbin = py.path.local.sysfind('svn')
+repodump = py.path.local(__file__).dirpath('repotest.dump')
+from py.builtin import print_
+
+def pytest_funcarg__repowc1(request):
+ if svnbin is None:
+ py.test.skip("svn binary not found")
+
+ tmpdir = request.getfuncargvalue("tmpdir")
+ repo, repourl, wc = request.cached_setup(
+ setup=lambda: getrepowc(tmpdir, "path1repo", "path1wc"),
+ scope="module",
+ )
+ for x in ('test_remove', 'test_move', 'test_status_deleted'):
+ if request.function.__name__.startswith(x):
+ #print >>sys.stderr, ("saving repo", repo, "for", request.function)
+ _savedrepowc = save_repowc(repo, wc)
+ request.addfinalizer(lambda: restore_repowc(_savedrepowc))
+ return repo, repourl, wc
+
+def pytest_funcarg__repowc2(request):
+ tmpdir = request.getfuncargvalue("tmpdir")
+ name = request.function.__name__
+ repo, url, wc = getrepowc(tmpdir, "%s-repo-2" % name, "%s-wc-2" % name)
+ return repo, url, wc
+
+def getsvnbin():
+ if svnbin is None:
+ py.test.skip("svn binary not found")
+ return svnbin
+
+# make a wc directory out of a given root url
+# cache previously obtained wcs!
+#
+def getrepowc(tmpdir, reponame='basetestrepo', wcname='wc'):
+ repo = tmpdir.mkdir(reponame)
+ wcdir = tmpdir.mkdir(wcname)
+ repo.ensure(dir=1)
+ py.process.cmdexec('svnadmin create "%s"' %
+ svncommon._escape_helper(repo))
+ py.process.cmdexec('svnadmin load -q "%s" <"%s"' %
+ (svncommon._escape_helper(repo), repodump))
+ print_("created svn repository", repo)
+ wcdir.ensure(dir=1)
+ wc = py.path.svnwc(wcdir)
+ if py.std.sys.platform == 'win32':
+ repourl = "file://" + '/' + str(repo).replace('\\', '/')
+ else:
+ repourl = "file://%s" % repo
+ wc.checkout(repourl)
+ print_("checked out new repo into", wc)
+ return (repo, repourl, wc)
+
+
+def save_repowc(repo, wc):
+ assert not str(repo).startswith("file://"), repo
+ assert repo.check()
+ savedrepo = repo.dirpath(repo.basename+".1")
+ savedwc = wc.dirpath(wc.basename+".1")
+ repo.copy(savedrepo)
+ wc.localpath.copy(savedwc.localpath)
+ return savedrepo, savedwc
+
+def restore_repowc(obj):
+ savedrepo, savedwc = obj
+ #print >>sys.stderr, ("restoring", savedrepo)
+ repo = savedrepo.new(basename=savedrepo.basename[:-2])
+ assert repo.check()
+ wc = savedwc.new(basename=savedwc.basename[:-2])
+ assert wc.check()
+ wc.localpath.remove()
+ repo.remove()
+ savedrepo.move(repo)
+ savedwc.localpath.move(wc.localpath)
+ py.path.svnurl._lsnorevcache.clear()
+ py.path.svnurl._lsrevcache.clear()
diff --git a/testing/web-platform/tests/tools/py/testing/path/repotest.dump b/testing/web-platform/tests/tools/py/testing/path/repotest.dump
new file mode 100644
index 000000000..c7819cad7
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/repotest.dump
@@ -0,0 +1,228 @@
+SVN-fs-dump-format-version: 2
+
+UUID: 876a30f4-1eed-0310-aeb7-ae314d1e5934
+
+Revision-number: 0
+Prop-content-length: 56
+Content-length: 56
+
+K 8
+svn:date
+V 27
+2005-01-07T23:55:31.755989Z
+PROPS-END
+
+Revision-number: 1
+Prop-content-length: 118
+Content-length: 118
+
+K 7
+svn:log
+V 20
+testrepo setup rev 1
+K 10
+svn:author
+V 3
+hpk
+K 8
+svn:date
+V 27
+2005-01-07T23:55:37.815386Z
+PROPS-END
+
+Node-path: execfile
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 4
+Text-content-md5: d4b5bc61e16310f08c5d11866eba0a22
+Content-length: 14
+
+PROPS-END
+x=42
+
+Node-path: otherdir
+Node-kind: dir
+Node-action: add
+Prop-content-length: 10
+Content-length: 10
+
+PROPS-END
+
+
+Node-path: otherdir/__init__.py
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 0
+Text-content-md5: d41d8cd98f00b204e9800998ecf8427e
+Content-length: 10
+
+PROPS-END
+
+
+Node-path: otherdir/a.py
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 30
+Text-content-md5: 247c7daeb2ee5dcab0aba7bd12bad665
+Content-length: 40
+
+PROPS-END
+from b import stuff as result
+
+
+Node-path: otherdir/b.py
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 15
+Text-content-md5: c1b13503469a7711306d03a4b0721bc6
+Content-length: 25
+
+PROPS-END
+stuff="got it"
+
+
+Node-path: otherdir/c.py
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 75
+Text-content-md5: 250cdb6b5df68536152c681f48297569
+Content-length: 85
+
+PROPS-END
+import py; py.magic.autopath()
+import otherdir.a
+value = otherdir.a.result
+
+
+Node-path: otherdir/d.py
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 72
+Text-content-md5: 940c9c621e7b198e081459642c37f5a7
+Content-length: 82
+
+PROPS-END
+import py; py.magic.autopath()
+from otherdir import a
+value2 = a.result
+
+
+Node-path: sampledir
+Node-kind: dir
+Node-action: add
+Prop-content-length: 10
+Content-length: 10
+
+PROPS-END
+
+
+Node-path: sampledir/otherfile
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 0
+Text-content-md5: d41d8cd98f00b204e9800998ecf8427e
+Content-length: 10
+
+PROPS-END
+
+
+Node-path: samplefile
+Node-kind: file
+Node-action: add
+Prop-content-length: 40
+Text-content-length: 11
+Text-content-md5: 9225ac28b32156979ab6482b8bb5fb8c
+Content-length: 51
+
+K 13
+svn:eol-style
+V 6
+native
+PROPS-END
+samplefile
+
+
+Node-path: samplepickle
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 56
+Text-content-md5: 719d85c1329a33134bb98f56b756c545
+Content-length: 66
+
+PROPS-END
+(dp1
+S'answer'
+p2
+I42
+sI1
+I2
+sS'hello'
+p3
+S'world'
+p4
+s.
+
+Revision-number: 2
+Prop-content-length: 108
+Content-length: 108
+
+K 7
+svn:log
+V 10
+second rev
+K 10
+svn:author
+V 3
+hpk
+K 8
+svn:date
+V 27
+2005-01-07T23:55:39.223202Z
+PROPS-END
+
+Node-path: anotherfile
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Text-content-length: 5
+Text-content-md5: 5d41402abc4b2a76b9719d911017c592
+Content-length: 15
+
+PROPS-END
+hello
+
+Revision-number: 3
+Prop-content-length: 106
+Content-length: 106
+
+K 7
+svn:log
+V 9
+third rev
+K 10
+svn:author
+V 3
+hpk
+K 8
+svn:date
+V 27
+2005-01-07T23:55:41.556642Z
+PROPS-END
+
+Node-path: anotherfile
+Node-kind: file
+Node-action: change
+Text-content-length: 5
+Text-content-md5: 7d793037a0760186574b0282f2f435e7
+Content-length: 5
+
+world
+
diff --git a/testing/web-platform/tests/tools/py/testing/path/svntestbase.py b/testing/web-platform/tests/tools/py/testing/path/svntestbase.py
new file mode 100644
index 000000000..8d94a9ca6
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/svntestbase.py
@@ -0,0 +1,31 @@
+import sys
+import py
+from py._path import svnwc as svncommon
+from common import CommonFSTests
+
+class CommonSvnTests(CommonFSTests):
+
+ def test_propget(self, path1):
+ url = path1.join("samplefile")
+ value = url.propget('svn:eol-style')
+ assert value == 'native'
+
+ def test_proplist(self, path1):
+ url = path1.join("samplefile")
+ res = url.proplist()
+ assert res['svn:eol-style'] == 'native'
+
+ def test_info(self, path1):
+ url = path1.join("samplefile")
+ res = url.info()
+ assert res.size > len("samplefile") and res.created_rev >= 0
+
+ def test_log_simple(self, path1):
+ url = path1.join("samplefile")
+ logentries = url.log()
+ for logentry in logentries:
+ assert logentry.rev == 1
+ assert hasattr(logentry, 'author')
+ assert hasattr(logentry, 'date')
+
+#cache.repositories.put(svnrepourl, 1200, 0)
diff --git a/testing/web-platform/tests/tools/py/testing/path/test_cacheutil.py b/testing/web-platform/tests/tools/py/testing/path/test_cacheutil.py
new file mode 100644
index 000000000..0b5cd3133
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/test_cacheutil.py
@@ -0,0 +1,84 @@
+import py
+from py._path import cacheutil
+
+class BasicCacheAPITest:
+ cache = None
+ def test_getorbuild(self):
+ val = self.cache.getorbuild(-42, lambda: 42)
+ assert val == 42
+ val = self.cache.getorbuild(-42, lambda: 23)
+ assert val == 42
+
+ def test_cache_get_key_error(self):
+ py.test.raises(KeyError, "self.cache._getentry(-23)")
+
+ def test_delentry_non_raising(self):
+ val = self.cache.getorbuild(100, lambda: 100)
+ self.cache.delentry(100)
+ py.test.raises(KeyError, "self.cache._getentry(100)")
+
+ def test_delentry_raising(self):
+ val = self.cache.getorbuild(100, lambda: 100)
+ self.cache.delentry(100)
+ py.test.raises(KeyError, "self.cache.delentry(100, raising=True)")
+
+ def test_clear(self):
+ self.cache.clear()
+
+class TestBuildcostAccess(BasicCacheAPITest):
+ cache = cacheutil.BuildcostAccessCache(maxentries=128)
+
+ def test_cache_works_somewhat_simple(self, monkeypatch):
+ cache = cacheutil.BuildcostAccessCache()
+ # the default gettime
+ # BuildcostAccessCache.build can
+ # result into time()-time() == 0 which makes the below
+ # test fail randomly. Let's rather use incrementing
+ # numbers instead.
+ l = [0]
+ def counter():
+ l[0] = l[0] + 1
+ return l[0]
+ monkeypatch.setattr(cacheutil, 'gettime', counter)
+ for x in range(cache.maxentries):
+ y = cache.getorbuild(x, lambda: x)
+ assert x == y
+ for x in range(cache.maxentries):
+ assert cache.getorbuild(x, None) == x
+ halfentries = int(cache.maxentries / 2)
+ for x in range(halfentries):
+ assert cache.getorbuild(x, None) == x
+ assert cache.getorbuild(x, None) == x
+ # evict one entry
+ val = cache.getorbuild(-1, lambda: 42)
+ assert val == 42
+ # check that recently used ones are still there
+ # and are not build again
+ for x in range(halfentries):
+ assert cache.getorbuild(x, None) == x
+ assert cache.getorbuild(-1, None) == 42
+
+
+class TestAging(BasicCacheAPITest):
+ maxsecs = 0.10
+ cache = cacheutil.AgingCache(maxentries=128, maxseconds=maxsecs)
+
+ def test_cache_eviction(self):
+ self.cache.getorbuild(17, lambda: 17)
+ endtime = py.std.time.time() + self.maxsecs * 10
+ while py.std.time.time() < endtime:
+ try:
+ self.cache._getentry(17)
+ except KeyError:
+ break
+ py.std.time.sleep(self.maxsecs*0.3)
+ else:
+ py.test.fail("waiting for cache eviction failed")
+
+def test_prune_lowestweight():
+ maxsecs = 0.05
+ cache = cacheutil.AgingCache(maxentries=10, maxseconds=maxsecs)
+ for x in range(cache.maxentries):
+ cache.getorbuild(x, lambda: x)
+ py.std.time.sleep(maxsecs*1.1)
+ cache.getorbuild(cache.maxentries+1, lambda: 42)
diff --git a/testing/web-platform/tests/tools/py/testing/path/test_local.py b/testing/web-platform/tests/tools/py/testing/path/test_local.py
new file mode 100644
index 000000000..bcf131fd2
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/test_local.py
@@ -0,0 +1,860 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import with_statement
+import py
+import pytest
+import os, sys
+from py.path import local
+import common
+
+failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
+failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
+ "and getattr(os, '_name', None) == 'nt'")
+win32only = py.test.mark.skipif(
+ "not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
+skiponwin32 = py.test.mark.skipif(
+ "sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
+
+
+def pytest_funcarg__path1(request):
+ def setup():
+ path1 = request.getfuncargvalue("tmpdir")
+ common.setuptestfs(path1)
+ return path1
+ def teardown(path1):
+ # post check
+ assert path1.join("samplefile").check()
+ return request.cached_setup(setup, teardown, scope="session")
+
+class TestLocalPath(common.CommonFSTests):
+ def test_join_normpath(self, tmpdir):
+ assert tmpdir.join(".") == tmpdir
+ p = tmpdir.join("../%s" % tmpdir.basename)
+ assert p == tmpdir
+ p = tmpdir.join("..//%s/" % tmpdir.basename)
+ assert p == tmpdir
+
+ @skiponwin32
+ def test_dirpath_abs_no_abs(self, tmpdir):
+ p = tmpdir.join('foo')
+ assert p.dirpath('/bar') == tmpdir.join('bar')
+ assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
+
+ def test_gethash(self, tmpdir):
+ md5 = py.builtin._tryimport('md5', 'hashlib').md5
+ lib = py.builtin._tryimport('sha', 'hashlib')
+ sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
+ fn = tmpdir.join("testhashfile")
+ data = 'hello'.encode('ascii')
+ fn.write(data, mode="wb")
+ assert fn.computehash("md5") == md5(data).hexdigest()
+ assert fn.computehash("sha1") == sha(data).hexdigest()
+ py.test.raises(ValueError, fn.computehash, "asdasd")
+
+ def test_remove_removes_readonly_file(self, tmpdir):
+ readonly_file = tmpdir.join('readonly').ensure()
+ readonly_file.chmod(0)
+ readonly_file.remove()
+ assert not readonly_file.check(exists=1)
+
+ def test_remove_removes_readonly_dir(self, tmpdir):
+ readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
+ readonly_dir.chmod(int("500", 8))
+ readonly_dir.remove()
+ assert not readonly_dir.check(exists=1)
+
+ def test_remove_removes_dir_and_readonly_file(self, tmpdir):
+ readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
+ readonly_file = readonly_dir.join('readonlyfile').ensure()
+ readonly_file.chmod(0)
+ readonly_dir.remove()
+ assert not readonly_dir.check(exists=1)
+
+ def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
+ l = []
+ monkeypatch.setattr(py.std.shutil, 'rmtree',
+ lambda *args, **kwargs: l.append(kwargs))
+ tmpdir.remove()
+ assert not l[0]['ignore_errors']
+ for val in (True, False):
+ l[:] = []
+ tmpdir.remove(ignore_errors=val)
+ assert l[0]['ignore_errors'] == val
+
+ def test_initialize_curdir(self):
+ assert str(local()) == py.std.os.getcwd()
+
+ @skiponwin32
+ def test_chdir_gone(self, path1):
+ p = path1.ensure("dir_to_be_removed", dir=1)
+ p.chdir()
+ p.remove()
+ pytest.raises(py.error.ENOENT, py.path.local)
+ assert path1.chdir() is None
+ assert os.getcwd() == str(path1)
+
+ def test_as_cwd(self, path1):
+ dir = path1.ensure("subdir", dir=1)
+ old = py.path.local()
+ with dir.as_cwd() as x:
+ assert x == old
+ assert py.path.local() == dir
+ assert os.getcwd() == str(old)
+
+ def test_as_cwd_exception(self, path1):
+ old = py.path.local()
+ dir = path1.ensure("subdir", dir=1)
+ with pytest.raises(ValueError):
+ with dir.as_cwd():
+ raise ValueError()
+ assert old == py.path.local()
+
+ def test_initialize_reldir(self, path1):
+ with path1.as_cwd():
+ p = local('samplefile')
+ assert p.check()
+
+ @pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
+ def test_tilde_expansion(self, monkeypatch, tmpdir):
+ monkeypatch.setenv("HOME", str(tmpdir))
+ p = py.path.local("~", expanduser=True)
+ assert p == os.path.expanduser("~")
+
+ def test_eq_with_strings(self, path1):
+ path1 = path1.join('sampledir')
+ path2 = str(path1)
+ assert path1 == path2
+ assert path2 == path1
+ path3 = path1.join('samplefile')
+ assert path3 != path2
+ assert path2 != path3
+
+ def test_eq_with_none(self, path1):
+ assert path1 != None
+
+ def test_gt_with_strings(self, path1):
+ path2 = path1.join('sampledir')
+ path3 = str(path1.join("ttt"))
+ assert path3 > path2
+ assert path2 < path3
+ assert path2 < "ttt"
+ assert "ttt" > path2
+ path4 = path1.join("aaa")
+ l = [path2, path4,path3]
+ assert sorted(l) == [path4, path2, path3]
+
+ def test_open_and_ensure(self, path1):
+ p = path1.join("sub1", "sub2", "file")
+ with p.open("w", ensure=1) as f:
+ f.write("hello")
+ assert p.read() == "hello"
+
+ def test_write_and_ensure(self, path1):
+ p = path1.join("sub1", "sub2", "file")
+ p.write("hello", ensure=1)
+ assert p.read() == "hello"
+
+ @py.test.mark.multi(bin=(False, True))
+ def test_dump(self, tmpdir, bin):
+ path = tmpdir.join("dumpfile%s" % int(bin))
+ try:
+ d = {'answer' : 42}
+ path.dump(d, bin=bin)
+ f = path.open('rb+')
+ dnew = py.std.pickle.load(f)
+ assert d == dnew
+ finally:
+ f.close()
+
+ @failsonjywin32
+ def test_setmtime(self):
+ import tempfile
+ import time
+ try:
+ fd, name = tempfile.mkstemp()
+ py.std.os.close(fd)
+ except AttributeError:
+ name = tempfile.mktemp()
+ open(name, 'w').close()
+ try:
+ mtime = int(time.time())-100
+ path = local(name)
+ assert path.mtime() != mtime
+ path.setmtime(mtime)
+ assert path.mtime() == mtime
+ path.setmtime()
+ assert path.mtime() != mtime
+ finally:
+ py.std.os.remove(name)
+
+ def test_normpath(self, path1):
+ new1 = path1.join("/otherdir")
+ new2 = path1.join("otherdir")
+ assert str(new1) == str(new2)
+
+ def test_mkdtemp_creation(self):
+ d = local.mkdtemp()
+ try:
+ assert d.check(dir=1)
+ finally:
+ d.remove(rec=1)
+
+ def test_tmproot(self):
+ d = local.mkdtemp()
+ tmproot = local.get_temproot()
+ try:
+ assert d.check(dir=1)
+ assert d.dirpath() == tmproot
+ finally:
+ d.remove(rec=1)
+
+ def test_chdir(self, tmpdir):
+ old = local()
+ try:
+ res = tmpdir.chdir()
+ assert str(res) == str(old)
+ assert py.std.os.getcwd() == str(tmpdir)
+ finally:
+ old.chdir()
+
+ def test_ensure_filepath_withdir(self, tmpdir):
+ newfile = tmpdir.join('test1','test')
+ newfile.ensure()
+ assert newfile.check(file=1)
+ newfile.write("42")
+ newfile.ensure()
+ s = newfile.read()
+ assert s == "42"
+
+ def test_ensure_filepath_withoutdir(self, tmpdir):
+ newfile = tmpdir.join('test1file')
+ t = newfile.ensure()
+ assert t == newfile
+ assert newfile.check(file=1)
+
+ def test_ensure_dirpath(self, tmpdir):
+ newfile = tmpdir.join('test1','testfile')
+ t = newfile.ensure(dir=1)
+ assert t == newfile
+ assert newfile.check(dir=1)
+
+ def test_init_from_path(self, tmpdir):
+ l = local()
+ l2 = local(l)
+ assert l2 == l
+
+ wc = py.path.svnwc('.')
+ l3 = local(wc)
+ assert l3 is not wc
+ assert l3.strpath == wc.strpath
+ assert not hasattr(l3, 'commit')
+
+ @py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
+ def test_long_filenames(self, tmpdir):
+ if sys.platform == "win32":
+ py.test.skip("win32: work around needed for path length limit")
+ # see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
+
+ # testing paths > 260 chars (which is Windows' limitation, but
+ # depending on how the paths are used), but > 4096 (which is the
+ # Linux' limitation) - the behaviour of paths with names > 4096 chars
+ # is undetermined
+ newfilename = '/test' * 60
+ l = tmpdir.join(newfilename)
+ l.ensure(file=True)
+ l.write('foo')
+ l2 = tmpdir.join(newfilename)
+ assert l2.read() == 'foo'
+
+ def test_visit_depth_first(self, tmpdir):
+ p1 = tmpdir.ensure("a","1")
+ p2 = tmpdir.ensure("b","2")
+ p3 = tmpdir.ensure("breadth")
+ l = list(tmpdir.visit(lambda x: x.check(file=1)))
+ assert len(l) == 3
+ # check that breadth comes last
+ assert l[2] == p3
+
+ def test_visit_rec_fnmatch(self, tmpdir):
+ p1 = tmpdir.ensure("a","123")
+ p2 = tmpdir.ensure(".b","345")
+ l = list(tmpdir.visit("???", rec="[!.]*"))
+ assert len(l) == 1
+ # check that breadth comes last
+ assert l[0] == p1
+
+ def test_fnmatch_file_abspath(self, tmpdir):
+ b = tmpdir.join("a", "b")
+ assert b.fnmatch(os.sep.join("ab"))
+ pattern = os.sep.join([str(tmpdir), "*", "b"])
+ assert b.fnmatch(pattern)
+
+ def test_sysfind(self):
+ name = sys.platform == "win32" and "cmd" or "test"
+ x = py.path.local.sysfind(name)
+ assert x.check(file=1)
+ assert py.path.local.sysfind('jaksdkasldqwe') is None
+ assert py.path.local.sysfind(name, paths=[]) is None
+ x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
+ assert x2 == x
+
+
+class TestExecutionOnWindows:
+ pytestmark = win32only
+
+ def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
+ monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
+ tmpdir.ensure("hello")
+ h = tmpdir.ensure("hello.bat")
+ x = py.path.local.sysfind("hello")
+ assert x == h
+
+
+class TestExecution:
+ pytestmark = skiponwin32
+
+ def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
+ noperm = tmpdir.ensure('noperm', dir=True)
+ monkeypatch.setenv("PATH", noperm, prepend=":")
+ noperm.chmod(0)
+ assert py.path.local.sysfind('jaksdkasldqwe') is None
+
+ def test_sysfind_absolute(self):
+ x = py.path.local.sysfind('test')
+ assert x.check(file=1)
+ y = py.path.local.sysfind(str(x))
+ assert y.check(file=1)
+ assert y == x
+
+ def test_sysfind_multiple(self, tmpdir, monkeypatch):
+ monkeypatch.setenv('PATH',
+ "%s:%s" % (tmpdir.ensure('a'),
+ tmpdir.join('b')),
+ prepend=":")
+ tmpdir.ensure('b', 'a')
+ checker = lambda x: x.dirpath().basename == 'b'
+ x = py.path.local.sysfind('a', checker=checker)
+ assert x.basename == 'a'
+ assert x.dirpath().basename == 'b'
+ checker = lambda x: None
+ assert py.path.local.sysfind('a', checker=checker) is None
+
+ def test_sysexec(self):
+ x = py.path.local.sysfind('ls')
+ out = x.sysexec('-a')
+ for x in py.path.local().listdir():
+ assert out.find(x.basename) != -1
+
+ def test_sysexec_failing(self):
+ x = py.path.local.sysfind('false')
+ py.test.raises(py.process.cmdexec.Error, """
+ x.sysexec('aksjdkasjd')
+ """)
+
+ def test_make_numbered_dir(self, tmpdir):
+ tmpdir.ensure('base.not_an_int', dir=1)
+ for i in range(10):
+ numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
+ keep=2, lock_timeout=0)
+ assert numdir.check()
+ assert numdir.basename == 'base.%d' %i
+ if i>=1:
+ assert numdir.new(ext=str(i-1)).check()
+ if i>=2:
+ assert numdir.new(ext=str(i-2)).check()
+ if i>=3:
+ assert not numdir.new(ext=str(i-3)).check()
+
+ def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
+ def notimpl(x, y):
+ raise NotImplementedError(42)
+ monkeypatch.setattr(py.std.os, 'symlink', notimpl)
+ x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
+ assert x.relto(tmpdir)
+ assert x.check()
+
+ def test_locked_make_numbered_dir(self, tmpdir):
+ for i in range(10):
+ numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
+ keep=2)
+ assert numdir.check()
+ assert numdir.basename == 'base2.%d' %i
+ for j in range(i):
+ assert numdir.new(ext=str(j)).check()
+
+ def test_error_preservation(self, path1):
+ py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
+ py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
+
+ #def test_parentdirmatch(self):
+ # local.parentdirmatch('std', startmodule=__name__)
+ #
+
+
+class TestImport:
+ def test_pyimport(self, path1):
+ obj = path1.join('execfile.py').pyimport()
+ assert obj.x == 42
+ assert obj.__name__ == 'execfile'
+
+ def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
+ p = tmpdir.ensure("a", "test_x123.py")
+ p.pyimport()
+ tmpdir.join("a").move(tmpdir.join("b"))
+ pytest.raises(tmpdir.ImportMismatchError,
+ lambda: tmpdir.join("b", "test_x123.py").pyimport())
+
+ def test_pyimport_messy_name(self, tmpdir):
+ # http://bitbucket.org/hpk42/py-trunk/issue/129
+ path = tmpdir.ensure('foo__init__.py')
+ obj = path.pyimport()
+
+ def test_pyimport_dir(self, tmpdir):
+ p = tmpdir.join("hello_123")
+ p_init = p.ensure("__init__.py")
+ m = p.pyimport()
+ assert m.__name__ == "hello_123"
+ m = p_init.pyimport()
+ assert m.__name__ == "hello_123"
+
+ def test_pyimport_execfile_different_name(self, path1):
+ obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
+ assert obj.x == 42
+ assert obj.__name__ == '0x.y.z'
+
+ def test_pyimport_a(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('a.py').pyimport()
+ assert mod.result == "got it"
+ assert mod.__name__ == 'otherdir.a'
+
+ def test_pyimport_b(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('b.py').pyimport()
+ assert mod.stuff == "got it"
+ assert mod.__name__ == 'otherdir.b'
+
+ def test_pyimport_c(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('c.py').pyimport()
+ assert mod.value == "got it"
+
+ def test_pyimport_d(self, path1):
+ otherdir = path1.join('otherdir')
+ mod = otherdir.join('d.py').pyimport()
+ assert mod.value2 == "got it"
+
+ def test_pyimport_and_import(self, tmpdir):
+ tmpdir.ensure('xxxpackage', '__init__.py')
+ mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
+ mod1 = mod1path.pyimport()
+ assert mod1.__name__ == 'xxxpackage.module1'
+ from xxxpackage import module1
+ assert module1 is mod1
+
+ def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
+ name = 'pointsback123'
+ ModuleType = type(py.std.os)
+ p = tmpdir.ensure(name + '.py')
+ for ending in ('.pyc', '$py.class', '.pyo'):
+ mod = ModuleType(name)
+ pseudopath = tmpdir.ensure(name+ending)
+ mod.__file__ = str(pseudopath)
+ monkeypatch.setitem(sys.modules, name, mod)
+ newmod = p.pyimport()
+ assert mod == newmod
+ monkeypatch.undo()
+ mod = ModuleType(name)
+ pseudopath = tmpdir.ensure(name+"123.py")
+ mod.__file__ = str(pseudopath)
+ monkeypatch.setitem(sys.modules, name, mod)
+ excinfo = py.test.raises(pseudopath.ImportMismatchError,
+ "p.pyimport()")
+ modname, modfile, orig = excinfo.value.args
+ assert modname == name
+ assert modfile == pseudopath
+ assert orig == p
+ assert issubclass(pseudopath.ImportMismatchError, ImportError)
+
+ def test_issue131_pyimport_on__init__(self, tmpdir):
+ # __init__.py files may be namespace packages, and thus the
+ # __file__ of an imported module may not be ourselves
+ # see issue
+ p1 = tmpdir.ensure("proja", "__init__.py")
+ p2 = tmpdir.ensure("sub", "proja", "__init__.py")
+ m1 = p1.pyimport()
+ m2 = p2.pyimport()
+ assert m1 == m2
+
+ def test_ensuresyspath_append(self, tmpdir):
+ root1 = tmpdir.mkdir("root1")
+ file1 = root1.ensure("x123.py")
+ assert str(root1) not in sys.path
+ file1.pyimport(ensuresyspath="append")
+ assert str(root1) == sys.path[-1]
+ assert str(root1) not in sys.path[:-1]
+
+
+def test_pypkgdir(tmpdir):
+ pkg = tmpdir.ensure('pkg1', dir=1)
+ pkg.ensure("__init__.py")
+ pkg.ensure("subdir/__init__.py")
+ assert pkg.pypkgpath() == pkg
+ assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
+
+def test_pypkgdir_unimportable(tmpdir):
+ pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
+ pkg.ensure("__init__.py")
+ subdir = pkg.ensure("subdir/__init__.py").dirpath()
+ assert subdir.pypkgpath() == subdir
+ assert subdir.ensure("xyz.py").pypkgpath() == subdir
+ assert not pkg.pypkgpath()
+
+def test_isimportable():
+ from py._path.local import isimportable
+ assert not isimportable("")
+ assert isimportable("x")
+ assert isimportable("x1")
+ assert isimportable("x_1")
+ assert isimportable("_")
+ assert isimportable("_1")
+ assert not isimportable("x-1")
+ assert not isimportable("x:1")
+
+def test_homedir_from_HOME(monkeypatch):
+ path = os.getcwd()
+ monkeypatch.setenv("HOME", path)
+ assert py.path.local._gethomedir() == py.path.local(path)
+
+def test_homedir_not_exists(monkeypatch):
+ monkeypatch.delenv("HOME", raising=False)
+ monkeypatch.delenv("HOMEDRIVE", raising=False)
+ homedir = py.path.local._gethomedir()
+ assert homedir is None
+
+def test_samefile(tmpdir):
+ assert tmpdir.samefile(tmpdir)
+ p = tmpdir.ensure("hello")
+ assert p.samefile(p)
+ with p.dirpath().as_cwd():
+ assert p.samefile(p.basename)
+ if sys.platform == "win32":
+ p1 = p.__class__(str(p).lower())
+ p2 = p.__class__(str(p).upper())
+ assert p1.samefile(p2)
+
+def test_listdir_single_arg(tmpdir):
+ tmpdir.ensure("hello")
+ assert tmpdir.listdir("hello")[0].basename == "hello"
+
+def test_mkdtemp_rootdir(tmpdir):
+ dtmp = local.mkdtemp(rootdir=tmpdir)
+ assert tmpdir.listdir() == [dtmp]
+
+class TestWINLocalPath:
+ pytestmark = win32only
+
+ def test_owner_group_not_implemented(self, path1):
+ py.test.raises(NotImplementedError, "path1.stat().owner")
+ py.test.raises(NotImplementedError, "path1.stat().group")
+
+ def test_chmod_simple_int(self, path1):
+ py.builtin.print_("path1 is", path1)
+ mode = path1.stat().mode
+ # Ensure that we actually change the mode to something different.
+ path1.chmod(mode == 0 and 1 or 0)
+ try:
+ print(path1.stat().mode)
+ print(mode)
+ assert path1.stat().mode != mode
+ finally:
+ path1.chmod(mode)
+ assert path1.stat().mode == mode
+
+ def test_path_comparison_lowercase_mixed(self, path1):
+ t1 = path1.join("a_path")
+ t2 = path1.join("A_path")
+ assert t1 == t1
+ assert t1 == t2
+
+ def test_relto_with_mixed_case(self, path1):
+ t1 = path1.join("a_path", "fiLe")
+ t2 = path1.join("A_path")
+ assert t1.relto(t2) == "fiLe"
+
+ def test_allow_unix_style_paths(self, path1):
+ t1 = path1.join('a_path')
+ assert t1 == str(path1) + '\\a_path'
+ t1 = path1.join('a_path/')
+ assert t1 == str(path1) + '\\a_path'
+ t1 = path1.join('dir/a_path')
+ assert t1 == str(path1) + '\\dir\\a_path'
+
+ def test_sysfind_in_currentdir(self, path1):
+ cmd = py.path.local.sysfind('cmd')
+ root = cmd.new(dirname='', basename='') # c:\ in most installations
+ with root.as_cwd():
+ x = py.path.local.sysfind(cmd.relto(root))
+ assert x.check(file=1)
+
+ def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
+ # path-matching patterns might contain a posix path separator '/'
+ # Test that we can match that pattern on windows.
+ import posixpath
+ b = tmpdir.join("a", "b")
+ assert b.fnmatch(posixpath.sep.join("ab"))
+ pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
+ assert b.fnmatch(pattern)
+
+class TestPOSIXLocalPath:
+ pytestmark = skiponwin32
+
+ def test_hardlink(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("Hello")
+ nlink = filepath.stat().nlink
+ linkpath.mklinkto(filepath)
+ assert filepath.stat().nlink == nlink + 1
+
+ def test_symlink_are_identical(self, tmpdir):
+ filepath = tmpdir.join('file')
+ filepath.write("Hello")
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(filepath)
+ assert linkpath.readlink() == str(filepath)
+
+ def test_symlink_isfile(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("")
+ linkpath.mksymlinkto(filepath)
+ assert linkpath.check(file=1)
+ assert not linkpath.check(link=0, file=1)
+ assert linkpath.islink()
+
+ def test_symlink_relative(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("Hello")
+ linkpath.mksymlinkto(filepath, absolute=False)
+ assert linkpath.readlink() == "file"
+ assert filepath.read() == linkpath.read()
+
+ def test_symlink_not_existing(self, tmpdir):
+ linkpath = tmpdir.join('testnotexisting')
+ assert not linkpath.check(link=1)
+ assert linkpath.check(link=0)
+
+ def test_relto_with_root(self, path1, tmpdir):
+ y = path1.join('x').relto(py.path.local('/'))
+ assert y[0] == str(path1)[1]
+
+ def test_visit_recursive_symlink(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(tmpdir)
+ visitor = tmpdir.visit(None, lambda x: x.check(link=0))
+ assert list(visitor) == [linkpath]
+
+ def test_symlink_isdir(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(tmpdir)
+ assert linkpath.check(dir=1)
+ assert not linkpath.check(link=0, dir=1)
+
+ def test_symlink_remove(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ linkpath.mksymlinkto(linkpath) # point to itself
+ assert linkpath.check(link=1)
+ linkpath.remove()
+ assert not linkpath.check()
+
+ def test_realpath_file(self, tmpdir):
+ linkpath = tmpdir.join('test')
+ filepath = tmpdir.join('file')
+ filepath.write("")
+ linkpath.mksymlinkto(filepath)
+ realpath = linkpath.realpath()
+ assert realpath.basename == 'file'
+
+ def test_owner(self, path1, tmpdir):
+ from pwd import getpwuid
+ from grp import getgrgid
+ stat = path1.stat()
+ assert stat.path == path1
+
+ uid = stat.uid
+ gid = stat.gid
+ owner = getpwuid(uid)[0]
+ group = getgrgid(gid)[0]
+
+ assert uid == stat.uid
+ assert owner == stat.owner
+ assert gid == stat.gid
+ assert group == stat.group
+
+ def test_stat_helpers(self, tmpdir, monkeypatch):
+ path1 = tmpdir.ensure("file")
+ stat1 = path1.stat()
+ stat2 = tmpdir.stat()
+ assert stat1.isfile()
+ assert stat2.isdir()
+ assert not stat1.islink()
+ assert not stat2.islink()
+
+ def test_stat_non_raising(self, tmpdir):
+ path1 = tmpdir.join("file")
+ pytest.raises(py.error.ENOENT, lambda: path1.stat())
+ res = path1.stat(raising=False)
+ assert res is None
+
+ def test_atime(self, tmpdir):
+ import time
+ path = tmpdir.ensure('samplefile')
+ now = time.time()
+ atime1 = path.atime()
+ # we could wait here but timer resolution is very
+ # system dependent
+ path.read()
+ time.sleep(0.01)
+ atime2 = path.atime()
+ time.sleep(0.01)
+ duration = time.time() - now
+ assert (atime2-atime1) <= duration
+
+ def test_commondir(self, path1):
+ # XXX This is here in local until we find a way to implement this
+ # using the subversion command line api.
+ p1 = path1.join('something')
+ p2 = path1.join('otherthing')
+ assert p1.common(p2) == path1
+ assert p2.common(p1) == path1
+
+ def test_commondir_nocommon(self, path1):
+ # XXX This is here in local until we find a way to implement this
+ # using the subversion command line api.
+ p1 = path1.join('something')
+ p2 = py.path.local(path1.sep+'blabla')
+ assert p1.common(p2) == '/'
+
+ def test_join_to_root(self, path1):
+ root = path1.parts()[0]
+ assert len(str(root)) == 1
+ assert str(root.join('a')) == '//a' # posix allows two slashes
+
+ def test_join_root_to_root_with_no_abs(self, path1):
+ nroot = path1.join('/')
+ assert str(path1) == str(nroot)
+ assert path1 == nroot
+
+ def test_chmod_simple_int(self, path1):
+ mode = path1.stat().mode
+ path1.chmod(int(mode/2))
+ try:
+ assert path1.stat().mode != mode
+ finally:
+ path1.chmod(mode)
+ assert path1.stat().mode == mode
+
+ def test_chmod_rec_int(self, path1):
+ # XXX fragile test
+ recfilter = lambda x: x.check(dotfile=0, link=0)
+ oldmodes = {}
+ for x in path1.visit(rec=recfilter):
+ oldmodes[x] = x.stat().mode
+ path1.chmod(int("772", 8), rec=recfilter)
+ try:
+ for x in path1.visit(rec=recfilter):
+ assert x.stat().mode & int("777", 8) == int("772", 8)
+ finally:
+ for x,y in oldmodes.items():
+ x.chmod(y)
+
+ def test_copy_archiving(self, tmpdir):
+ unicode_fn = u"something-\342\200\223.txt"
+ f = tmpdir.ensure("a", unicode_fn)
+ a = f.dirpath()
+ oldmode = f.stat().mode
+ newmode = oldmode ^ 1
+ f.chmod(newmode)
+ b = tmpdir.join("b")
+ a.copy(b, mode=True)
+ assert b.join(f.basename).stat().mode == newmode
+
+ @failsonjython
+ def test_chown_identity(self, path1):
+ owner = path1.stat().owner
+ group = path1.stat().group
+ path1.chown(owner, group)
+
+ @failsonjython
+ def test_chown_dangling_link(self, path1):
+ owner = path1.stat().owner
+ group = path1.stat().group
+ x = path1.join('hello')
+ x.mksymlinkto('qlwkejqwlek')
+ try:
+ path1.chown(owner, group, rec=1)
+ finally:
+ x.remove(rec=0)
+
+ @failsonjython
+ def test_chown_identity_rec_mayfail(self, path1):
+ owner = path1.stat().owner
+ group = path1.stat().group
+ path1.chown(owner, group)
+
+
+class TestUnicodePy2Py3:
+ def test_join_ensure(self, tmpdir, monkeypatch):
+ if sys.version_info >= (3,0) and "LANG" not in os.environ:
+ pytest.skip("cannot run test without locale")
+ x = py.path.local(tmpdir.strpath)
+ part = "hällo"
+ y = x.ensure(part)
+ assert x.join(part) == y
+
+ def test_listdir(self, tmpdir):
+ if sys.version_info >= (3,0) and "LANG" not in os.environ:
+ pytest.skip("cannot run test without locale")
+ x = py.path.local(tmpdir.strpath)
+ part = "hällo"
+ y = x.ensure(part)
+ assert x.listdir(part)[0] == y
+
+ @pytest.mark.xfail(reason="changing read/write might break existing usages")
+ def test_read_write(self, tmpdir):
+ x = tmpdir.join("hello")
+ part = py.builtin._totext("hällo", "utf8")
+ x.write(part)
+ assert x.read() == part
+ x.write(part.encode(sys.getdefaultencoding()))
+ assert x.read() == part.encode(sys.getdefaultencoding())
+
+class TestBinaryAndTextMethods:
+ def test_read_binwrite(self, tmpdir):
+ x = tmpdir.join("hello")
+ part = py.builtin._totext("hällo", "utf8")
+ part_utf8 = part.encode("utf8")
+ x.write_binary(part_utf8)
+ assert x.read_binary() == part_utf8
+ s = x.read_text(encoding="utf8")
+ assert s == part
+ assert py.builtin._istext(s)
+
+ def test_read_textwrite(self, tmpdir):
+ x = tmpdir.join("hello")
+ part = py.builtin._totext("hällo", "utf8")
+ part_utf8 = part.encode("utf8")
+ x.write_text(part, encoding="utf8")
+ assert x.read_binary() == part_utf8
+ assert x.read_text(encoding="utf8") == part
+
+ def test_default_encoding(self, tmpdir):
+ x = tmpdir.join("hello")
+ # Can't use UTF8 as the default encoding (ASCII) doesn't support it
+ part = py.builtin._totext("hello", "ascii")
+ x.write_text(part, "ascii")
+ s = x.read_text("ascii")
+ assert s == part
+ assert type(s) == type(part)
diff --git a/testing/web-platform/tests/tools/py/testing/path/test_svnauth.py b/testing/web-platform/tests/tools/py/testing/path/test_svnauth.py
new file mode 100644
index 000000000..b3f366561
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/test_svnauth.py
@@ -0,0 +1,454 @@
+import py
+import svntestbase
+from py.path import SvnAuth
+import time
+import sys
+
+svnbin = py.path.local.sysfind('svn')
+
+def make_repo_auth(repo, userdata):
+ """ write config to repo
+
+ user information in userdata is used for auth
+ userdata has user names as keys, and a tuple (password, readwrite) as
+ values, where 'readwrite' is either 'r' or 'rw'
+ """
+ confdir = py.path.local(repo).join('conf')
+ confdir.join('svnserve.conf').write('''\
+[general]
+anon-access = none
+password-db = passwd
+authz-db = authz
+realm = TestRepo
+''')
+ authzdata = '[/]\n'
+ passwddata = '[users]\n'
+ for user in userdata:
+ authzdata += '%s = %s\n' % (user, userdata[user][1])
+ passwddata += '%s = %s\n' % (user, userdata[user][0])
+ confdir.join('authz').write(authzdata)
+ confdir.join('passwd').write(passwddata)
+
+def serve_bg(repopath):
+ pidfile = py.path.local(repopath).join('pid')
+ port = 10000
+ e = None
+ while port < 10010:
+ cmd = 'svnserve -d -T --listen-port=%d --pid-file=%s -r %s' % (
+ port, pidfile, repopath)
+ print(cmd)
+ try:
+ py.process.cmdexec(cmd)
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ else:
+ # XXX we assume here that the pid file gets written somewhere, I
+ # guess this should be relatively safe... (I hope, at least?)
+ counter = pid = 0
+ while counter < 10:
+ counter += 1
+ try:
+ pid = pidfile.read()
+ except py.error.ENOENT:
+ pass
+ if pid:
+ break
+ time.sleep(0.2)
+ return port, int(pid)
+ port += 1
+ raise IOError('could not start svnserve: %s' % (e,))
+
+class TestSvnAuth(object):
+ def test_basic(self):
+ auth = SvnAuth('foo', 'bar')
+ assert auth.username == 'foo'
+ assert auth.password == 'bar'
+ assert str(auth)
+
+ def test_makecmdoptions_uname_pw_makestr(self):
+ auth = SvnAuth('foo', 'bar')
+ assert auth.makecmdoptions() == '--username="foo" --password="bar"'
+
+ def test_makecmdoptions_quote_escape(self):
+ auth = SvnAuth('fo"o', '"ba\'r"')
+ assert auth.makecmdoptions() == '--username="fo\\"o" --password="\\"ba\'r\\""'
+
+ def test_makecmdoptions_no_cache_auth(self):
+ auth = SvnAuth('foo', 'bar', cache_auth=False)
+ assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
+ '--no-auth-cache')
+
+ def test_makecmdoptions_no_interactive(self):
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
+ '--non-interactive')
+
+ def test_makecmdoptions_no_interactive_no_cache_auth(self):
+ auth = SvnAuth('foo', 'bar', cache_auth=False,
+ interactive=False)
+ assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
+ '--no-auth-cache --non-interactive')
+
+class svnwc_no_svn(py.path.svnwc):
+ def __new__(cls, *args, **kwargs):
+ self = super(svnwc_no_svn, cls).__new__(cls, *args, **kwargs)
+ self.commands = []
+ return self
+
+ def _svn(self, *args):
+ self.commands.append(args)
+
+class TestSvnWCAuth(object):
+ def setup_method(self, meth):
+ if not svnbin:
+ py.test.skip("svn binary required")
+ self.auth = SvnAuth('user', 'pass', cache_auth=False)
+
+ def test_checkout(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.checkout('url')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+ def test_commit(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.commit('msg')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+ def test_checkout_no_cache_auth(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.checkout('url')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+ def test_checkout_auth_from_constructor(self):
+ wc = svnwc_no_svn('foo', auth=self.auth)
+ wc.checkout('url')
+ assert wc.commands[0][-1] == ('--username="user" --password="pass" '
+ '--no-auth-cache')
+
+class svnurl_no_svn(py.path.svnurl):
+ cmdexec_output = 'test'
+ popen_output = 'test'
+ def __new__(cls, *args, **kwargs):
+ self = super(svnurl_no_svn, cls).__new__(cls, *args, **kwargs)
+ self.commands = []
+ return self
+
+ def _cmdexec(self, cmd):
+ self.commands.append(cmd)
+ return self.cmdexec_output
+
+ def _popen(self, cmd):
+ self.commands.append(cmd)
+ return self.popen_output
+
+class TestSvnURLAuth(object):
+ def setup_method(self, meth):
+ self.auth = SvnAuth('foo', 'bar')
+
+ def test_init(self):
+ u = svnurl_no_svn('http://foo.bar/svn')
+ assert u.auth is None
+
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ assert u.auth is self.auth
+
+ def test_new(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ new = u.new(basename='bar')
+ assert new.auth is self.auth
+ assert new.url == 'http://foo.bar/svn/bar'
+
+ def test_join(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ new = u.join('foo')
+ assert new.auth is self.auth
+ assert new.url == 'http://foo.bar/svn/foo'
+
+ def test_listdir(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ u.cmdexec_output = '''\
+ 1717 johnny 1529 Nov 04 14:32 LICENSE.txt
+ 1716 johnny 5352 Nov 04 14:28 README.txt
+'''
+ paths = u.listdir()
+ assert paths[0].auth is self.auth
+ assert paths[1].auth is self.auth
+ assert paths[0].basename == 'LICENSE.txt'
+
+ def test_info(self):
+ u = svnurl_no_svn('http://foo.bar/svn/LICENSE.txt', auth=self.auth)
+ def dirpath(self):
+ return self
+ u.cmdexec_output = '''\
+ 1717 johnny 1529 Nov 04 14:32 LICENSE.txt
+ 1716 johnny 5352 Nov 04 14:28 README.txt
+'''
+ org_dp = u.__class__.dirpath
+ u.__class__.dirpath = dirpath
+ try:
+ info = u.info()
+ finally:
+ u.dirpath = org_dp
+ assert info.size == 1529
+
+ def test_open(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ foo = u.join('foo')
+ foo.check = lambda *args, **kwargs: True
+ ret = foo.open()
+ assert ret == 'test'
+ assert '--username="foo" --password="bar"' in foo.commands[0]
+
+ def test_dirpath(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ parent = u.dirpath()
+ assert parent.auth is self.auth
+
+ def test_mkdir(self):
+ u = svnurl_no_svn('http://foo.bar/svn/qweqwe', auth=self.auth)
+ assert not u.commands
+ u.mkdir(msg='created dir foo')
+ assert u.commands
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_copy(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ u2 = svnurl_no_svn('http://foo.bar/svn2')
+ u.copy(u2, 'copied dir')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_rename(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ u.rename('http://foo.bar/svn/bar', 'moved foo to bar')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_remove(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ u.remove(msg='removing foo')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_export(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ target = py.path.local('/foo')
+ u.export(target)
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+ def test_log(self):
+ u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
+ u.popen_output = py.io.TextIO(py.builtin._totext('''\
+<?xml version="1.0"?>
+<log>
+<logentry revision="51381">
+<author>guido</author>
+<date>2008-02-11T12:12:18.476481Z</date>
+<msg>Creating branch to work on auth support for py.path.svn*.
+</msg>
+</logentry>
+</log>
+''', 'ascii'))
+ u.check = lambda *args, **kwargs: True
+ ret = u.log(10, 20, verbose=True)
+ assert '--username="foo" --password="bar"' in u.commands[0]
+ assert len(ret) == 1
+ assert int(ret[0].rev) == 51381
+ assert ret[0].author == 'guido'
+
+ def test_propget(self):
+ u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
+ u.propget('foo')
+ assert '--username="foo" --password="bar"' in u.commands[0]
+
+def pytest_funcarg__setup(request):
+ return Setup(request)
+
+class Setup:
+ def __init__(self, request):
+ if not svnbin:
+ py.test.skip("svn binary required")
+ if not request.config.option.runslowtests:
+ py.test.skip('use --runslowtests to run these tests')
+
+ tmpdir = request.getfuncargvalue("tmpdir")
+ repodir = tmpdir.join("repo")
+ py.process.cmdexec('svnadmin create %s' % repodir)
+ if sys.platform == 'win32':
+ repodir = '/' + str(repodir).replace('\\', '/')
+ self.repo = py.path.svnurl("file://%s" % repodir)
+ if py.std.sys.platform == 'win32':
+ # remove trailing slash...
+ repodir = repodir[1:]
+ self.repopath = py.path.local(repodir)
+ self.temppath = tmpdir.mkdir("temppath")
+ self.auth = SvnAuth('johnny', 'foo', cache_auth=False,
+ interactive=False)
+ make_repo_auth(self.repopath, {'johnny': ('foo', 'rw')})
+ self.port, self.pid = serve_bg(self.repopath.dirpath())
+ # XXX caching is too global
+ py.path.svnurl._lsnorevcache._dict.clear()
+ request.addfinalizer(lambda: py.process.kill(self.pid))
+
+class TestSvnWCAuthFunctional:
+ def test_checkout_constructor_arg(self, setup):
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ assert wc.join('.svn').check()
+
+ def test_checkout_function_arg(self, setup):
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ assert wc.join('.svn').check()
+
+ def test_checkout_failing_non_interactive(self, setup):
+ auth = SvnAuth('johnny', 'bar', cache_auth=False,
+ interactive=False)
+ wc = py.path.svnwc(setup.temppath, auth)
+ py.test.raises(Exception,
+ ("wc.checkout('svn://localhost:%(port)s/%(repopath)s')" %
+ setup.__dict__))
+
+ def test_log(self, setup):
+ wc = py.path.svnwc(setup.temppath, setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ foo = wc.ensure('foo.txt')
+ wc.commit('added foo.txt')
+ log = foo.log()
+ assert len(log) == 1
+ assert log[0].msg == 'added foo.txt'
+
+ def test_switch(self, setup):
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ svnurl = 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename)
+ wc.checkout(svnurl)
+ wc.ensure('foo', dir=True).ensure('foo.txt').write('foo')
+ wc.commit('added foo dir with foo.txt file')
+ wc.ensure('bar', dir=True)
+ wc.commit('added bar dir')
+ bar = wc.join('bar')
+ bar.switch(svnurl + '/foo')
+ assert bar.join('foo.txt')
+
+ def test_update(self, setup):
+ wc1 = py.path.svnwc(setup.temppath.ensure('wc1', dir=True),
+ auth=setup.auth)
+ wc2 = py.path.svnwc(setup.temppath.ensure('wc2', dir=True),
+ auth=setup.auth)
+ wc1.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ wc2.checkout(
+ 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
+ wc1.ensure('foo', dir=True)
+ wc1.commit('added foo dir')
+ wc2.update()
+ assert wc2.join('foo').check()
+
+ auth = SvnAuth('unknown', 'unknown', interactive=False)
+ wc2.auth = auth
+ py.test.raises(Exception, 'wc2.update()')
+
+ def test_lock_unlock_status(self, setup):
+ port = setup.port
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
+ wc.ensure('foo', file=True)
+ wc.commit('added foo file')
+ foo = wc.join('foo')
+ foo.lock()
+ status = foo.status()
+ assert status.locked
+ foo.unlock()
+ status = foo.status()
+ assert not status.locked
+
+ auth = SvnAuth('unknown', 'unknown', interactive=False)
+ foo.auth = auth
+ py.test.raises(Exception, 'foo.lock()')
+ py.test.raises(Exception, 'foo.unlock()')
+
+ def test_diff(self, setup):
+ port = setup.port
+ wc = py.path.svnwc(setup.temppath, auth=setup.auth)
+ wc.checkout(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
+ wc.ensure('foo', file=True)
+ wc.commit('added foo file')
+ wc.update()
+ rev = int(wc.status().rev)
+ foo = wc.join('foo')
+ foo.write('bar')
+ diff = foo.diff()
+ assert '\n+bar\n' in diff
+ foo.commit('added some content')
+ diff = foo.diff()
+ assert not diff
+ diff = foo.diff(rev=rev)
+ assert '\n+bar\n' in diff
+
+ auth = SvnAuth('unknown', 'unknown', interactive=False)
+ foo.auth = auth
+ py.test.raises(Exception, 'foo.diff(rev=rev)')
+
+class TestSvnURLAuthFunctional:
+ def test_listdir(self, setup):
+ port = setup.port
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=setup.auth)
+ u.ensure('foo')
+ paths = u.listdir()
+ assert len(paths) == 1
+ assert paths[0].auth is setup.auth
+
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=auth)
+ py.test.raises(Exception, 'u.listdir()')
+
+ def test_copy(self, setup):
+ port = setup.port
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=setup.auth)
+ foo = u.mkdir('foo')
+ assert foo.check()
+ bar = u.join('bar')
+ foo.copy(bar)
+ assert bar.check()
+ assert bar.auth is setup.auth
+
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=auth)
+ foo = u.join('foo')
+ bar = u.join('bar')
+ py.test.raises(Exception, 'foo.copy(bar)')
+
+ def test_write_read(self, setup):
+ port = setup.port
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=setup.auth)
+ foo = u.ensure('foo')
+ fp = foo.open()
+ try:
+ data = fp.read()
+ finally:
+ fp.close()
+ assert data == ''
+
+ auth = SvnAuth('foo', 'bar', interactive=False)
+ u = py.path.svnurl(
+ 'svn://localhost:%s/%s' % (port, setup.repopath.basename),
+ auth=auth)
+ foo = u.join('foo')
+ py.test.raises(Exception, 'foo.open()')
+
+ # XXX rinse, repeat... :|
diff --git a/testing/web-platform/tests/tools/py/testing/path/test_svnurl.py b/testing/web-platform/tests/tools/py/testing/path/test_svnurl.py
new file mode 100644
index 000000000..15fbea504
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/test_svnurl.py
@@ -0,0 +1,95 @@
+import py
+from py._path.svnurl import InfoSvnCommand
+import datetime
+import time
+from svntestbase import CommonSvnTests
+
+def pytest_funcarg__path1(request):
+ repo, repourl, wc = request.getfuncargvalue("repowc1")
+ return py.path.svnurl(repourl)
+
+class TestSvnURLCommandPath(CommonSvnTests):
+ @py.test.mark.xfail
+ def test_load(self, path1):
+ super(TestSvnURLCommandPath, self).test_load(path1)
+
+ # the following two work on jython but not in local/svnwc
+ def test_listdir(self, path1):
+ super(TestSvnURLCommandPath, self).test_listdir(path1)
+ def test_visit_ignore(self, path1):
+ super(TestSvnURLCommandPath, self).test_visit_ignore(path1)
+
+ def test_svnurl_needs_arg(self, path1):
+ py.test.raises(TypeError, "py.path.svnurl()")
+
+ def test_svnurl_does_not_accept_None_either(self, path1):
+ py.test.raises(Exception, "py.path.svnurl(None)")
+
+ def test_svnurl_characters_simple(self, path1):
+ py.path.svnurl("svn+ssh://hello/world")
+
+ def test_svnurl_characters_at_user(self, path1):
+ py.path.svnurl("http://user@host.com/some/dir")
+
+ def test_svnurl_characters_at_path(self, path1):
+ py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo@bar")')
+
+ def test_svnurl_characters_colon_port(self, path1):
+ py.path.svnurl("http://host.com:8080/some/dir")
+
+ def test_svnurl_characters_tilde_end(self, path1):
+ py.path.svnurl("http://host.com/some/file~")
+
+ @py.test.mark.xfail("sys.platform == 'win32'")
+ def test_svnurl_characters_colon_path(self, path1):
+ # colons are allowed on win32, because they're part of the drive
+ # part of an absolute path... however, they shouldn't be allowed in
+ # other parts, I think
+ py.test.raises(ValueError, 'py.path.svnurl("http://host.com/foo:bar")')
+
+ def test_export(self, path1, tmpdir):
+ tmpdir = tmpdir.join("empty")
+ p = path1.export(tmpdir)
+ assert p == tmpdir # XXX should return None
+ n1 = [x.basename for x in tmpdir.listdir()]
+ n2 = [x.basename for x in path1.listdir()]
+ n1.sort()
+ n2.sort()
+ assert n1 == n2
+ assert not p.join('.svn').check()
+ rev = path1.mkdir("newdir")
+ tmpdir.remove()
+ assert not tmpdir.check()
+ path1.new(rev=1).export(tmpdir)
+ for p in tmpdir.listdir():
+ assert p.basename in n2
+
+class TestSvnInfoCommand:
+
+ def test_svn_1_2(self):
+ line = " 2256 hpk 165 Nov 24 17:55 __init__.py"
+ info = InfoSvnCommand(line)
+ now = datetime.datetime.now()
+ assert info.last_author == 'hpk'
+ assert info.created_rev == 2256
+ assert info.kind == 'file'
+ # we don't check for the year (2006), because that depends
+ # on the clock correctly being setup
+ assert time.gmtime(info.mtime)[1:6] == (11, 24, 17, 55, 0)
+ assert info.size == 165
+ assert info.time == info.mtime * 1000000
+
+ def test_svn_1_3(self):
+ line =" 4784 hpk 2 Jun 01 2004 __init__.py"
+ info = InfoSvnCommand(line)
+ assert info.last_author == 'hpk'
+ assert info.kind == 'file'
+
+ def test_svn_1_3_b(self):
+ line =" 74 autoadmi Oct 06 23:59 plonesolutions.com/"
+ info = InfoSvnCommand(line)
+ assert info.last_author == 'autoadmi'
+ assert info.kind == 'dir'
+
+def test_badchars():
+ py.test.raises(ValueError, "py.path.svnurl('http://host/tmp/@@@:')")
diff --git a/testing/web-platform/tests/tools/py/testing/path/test_svnwc.py b/testing/web-platform/tests/tools/py/testing/path/test_svnwc.py
new file mode 100644
index 000000000..9e6b524ac
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/path/test_svnwc.py
@@ -0,0 +1,549 @@
+import py
+import os, sys
+import pytest
+from py._path.svnwc import InfoSvnWCCommand, XMLWCStatus, parse_wcinfotime
+from py._path import svnwc as svncommon
+from svntestbase import CommonSvnTests
+
+def test_make_repo(path1, tmpdir):
+ repo = tmpdir.join("repo")
+ py.process.cmdexec('svnadmin create %s' % repo)
+ if sys.platform == 'win32':
+ repo = '/' + str(repo).replace('\\', '/')
+ repo = py.path.svnurl("file://%s" % repo)
+ wc = py.path.svnwc(tmpdir.join("wc"))
+ wc.checkout(repo)
+ assert wc.rev == 0
+ assert len(wc.listdir()) == 0
+ p = wc.join("a_file")
+ p.write("test file")
+ p.add()
+ rev = wc.commit("some test")
+ assert p.info().rev == 1
+ assert rev == 1
+ rev = wc.commit()
+ assert rev is None
+
+def pytest_funcarg__path1(request):
+ repo, repourl, wc = request.getfuncargvalue("repowc1")
+ return wc
+
+class TestWCSvnCommandPath(CommonSvnTests):
+ def test_status_attributes_simple(self, path1):
+ def assert_nochange(p):
+ s = p.status()
+ assert not s.modified
+ assert not s.prop_modified
+ assert not s.added
+ assert not s.deleted
+ assert not s.replaced
+
+ dpath = path1.join('sampledir')
+ assert_nochange(path1.join('sampledir'))
+ assert_nochange(path1.join('samplefile'))
+
+ def test_status_added(self, path1):
+ nf = path1.join('newfile')
+ nf.write('hello')
+ nf.add()
+ try:
+ s = nf.status()
+ assert s.added
+ assert not s.modified
+ assert not s.prop_modified
+ assert not s.replaced
+ finally:
+ nf.revert()
+
+ def test_status_change(self, path1):
+ nf = path1.join('samplefile')
+ try:
+ nf.write(nf.read() + 'change')
+ s = nf.status()
+ assert not s.added
+ assert s.modified
+ assert not s.prop_modified
+ assert not s.replaced
+ finally:
+ nf.revert()
+
+ def test_status_added_ondirectory(self, path1):
+ sampledir = path1.join('sampledir')
+ try:
+ t2 = sampledir.mkdir('t2')
+ t1 = t2.join('t1')
+ t1.write('test')
+ t1.add()
+ s = sampledir.status(rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert t1.basename in [item.basename for item in s.added]
+ assert t2.basename in [item.basename for item in s.added]
+ finally:
+ t2.revert(rec=1)
+ t2.localpath.remove(rec=1)
+
+ def test_status_unknown(self, path1):
+ t1 = path1.join('un1')
+ try:
+ t1.write('test')
+ s = path1.status()
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert t1.basename in [item.basename for item in s.unknown]
+ finally:
+ t1.localpath.remove()
+
+ def test_status_unchanged(self, path1):
+ r = path1
+ s = path1.status(rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert r.join('samplefile').basename in [item.basename
+ for item in s.unchanged]
+ assert r.join('sampledir').basename in [item.basename
+ for item in s.unchanged]
+ assert r.join('sampledir/otherfile').basename in [item.basename
+ for item in s.unchanged]
+
+ @pytest.mark.xfail(reason="svn-1.7 has buggy 'status --xml' output")
+ def test_status_update(self, path1):
+ r = path1
+ try:
+ r.update(rev=1)
+ s = r.status(updates=1, rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ py.std.pprint.pprint(s.allpath())
+ assert r.join('anotherfile').basename in [item.basename for
+ item in s.update_available]
+ #assert len(s.update_available) == 1
+ finally:
+ r.update()
+
+ def test_status_replaced(self, path1):
+ p = path1.join("samplefile")
+ p.remove()
+ p.ensure(dir=0)
+ try:
+ s = path1.status()
+ assert p.basename in [item.basename for item in s.replaced]
+ finally:
+ path1.revert(rec=1)
+
+ def test_status_ignored(self, path1):
+ try:
+ d = path1.join('sampledir')
+ p = py.path.local(d).join('ignoredfile')
+ p.ensure(file=True)
+ s = d.status()
+ assert [x.basename for x in s.unknown] == ['ignoredfile']
+ assert [x.basename for x in s.ignored] == []
+ d.propset('svn:ignore', 'ignoredfile')
+ s = d.status()
+ assert [x.basename for x in s.unknown] == []
+ assert [x.basename for x in s.ignored] == ['ignoredfile']
+ finally:
+ path1.revert(rec=1)
+
+ def test_status_conflict(self, path1, tmpdir):
+ wc = path1
+ wccopy = py.path.svnwc(tmpdir.join("conflict_copy"))
+ wccopy.checkout(wc.url)
+ p = wc.ensure('conflictsamplefile', file=1)
+ p.write('foo')
+ wc.commit('added conflictsamplefile')
+ wccopy.update()
+ assert wccopy.join('conflictsamplefile').check()
+ p.write('bar')
+ wc.commit('wrote some data')
+ wccopy.join('conflictsamplefile').write('baz')
+ wccopy.update(interactive=False)
+ s = wccopy.status()
+ assert [x.basename for x in s.conflict] == ['conflictsamplefile']
+
+ def test_status_external(self, path1, repowc2):
+ otherrepo, otherrepourl, otherwc = repowc2
+ d = path1.ensure('sampledir', dir=1)
+ try:
+ d.update()
+ d.propset('svn:externals', 'otherwc %s' % (otherwc.url,))
+ d.update()
+ s = d.status()
+ assert [x.basename for x in s.external] == ['otherwc']
+ assert 'otherwc' not in [x.basename for x in s.unchanged]
+ s = d.status(rec=1)
+ assert [x.basename for x in s.external] == ['otherwc']
+ assert 'otherwc' in [x.basename for x in s.unchanged]
+ finally:
+ path1.revert(rec=1)
+
+ def test_status_deleted(self, path1):
+ d = path1.ensure('sampledir', dir=1)
+ d.remove()
+ d.ensure(dir=1)
+ path1.commit()
+ d.ensure('deletefile', dir=0)
+ d.commit()
+ s = d.status()
+ assert 'deletefile' in [x.basename for x in s.unchanged]
+ assert not s.deleted
+ p = d.join('deletefile')
+ p.remove()
+ s = d.status()
+ assert 'deletefile' not in s.unchanged
+ assert [x.basename for x in s.deleted] == ['deletefile']
+
+ def test_status_noauthor(self, path1):
+ # testing for XML without author - this used to raise an exception
+ xml = '''\
+ <entry path="/tmp/pytest-23/wc">
+ <wc-status item="normal" props="none" revision="0">
+ <commit revision="0">
+ <date>2008-08-19T16:50:53.400198Z</date>
+ </commit>
+ </wc-status>
+ </entry>
+ '''
+ XMLWCStatus.fromstring(xml, path1)
+
+ def test_status_wrong_xml(self, path1):
+ # testing for XML without author - this used to raise an exception
+ xml = '<entry path="/home/jean/zope/venv/projectdb/parts/development-products/DataGridField">\n<wc-status item="incomplete" props="none" revision="784">\n</wc-status>\n</entry>'
+ st = XMLWCStatus.fromstring(xml, path1)
+ assert len(st.incomplete) == 1
+
+ def test_diff(self, path1):
+ p = path1 / 'anotherfile'
+ out = p.diff(rev=2)
+ assert out.find('hello') != -1
+
+ def test_blame(self, path1):
+ p = path1.join('samplepickle')
+ lines = p.blame()
+ assert sum([l[0] for l in lines]) == len(lines)
+ for l1, l2 in zip(p.readlines(), [l[2] for l in lines]):
+ assert l1 == l2
+ assert [l[1] for l in lines] == ['hpk'] * len(lines)
+ p = path1.join('samplefile')
+ lines = p.blame()
+ assert sum([l[0] for l in lines]) == len(lines)
+ for l1, l2 in zip(p.readlines(), [l[2] for l in lines]):
+ assert l1 == l2
+ assert [l[1] for l in lines] == ['hpk'] * len(lines)
+
+ def test_join_abs(self, path1):
+ s = str(path1.localpath)
+ n = path1.join(s, abs=1)
+ assert path1 == n
+
+ def test_join_abs2(self, path1):
+ assert path1.join('samplefile', abs=1) == path1.join('samplefile')
+
+ def test_str_gives_localpath(self, path1):
+ assert str(path1) == str(path1.localpath)
+
+ def test_versioned(self, path1):
+ assert path1.check(versioned=1)
+ # TODO: Why does my copy of svn think .svn is versioned?
+ #assert path1.join('.svn').check(versioned=0)
+ assert path1.join('samplefile').check(versioned=1)
+ assert not path1.join('notexisting').check(versioned=1)
+ notexisting = path1.join('hello').localpath
+ try:
+ notexisting.write("")
+ assert path1.join('hello').check(versioned=0)
+ finally:
+ notexisting.remove()
+
+ def test_listdir_versioned(self, path1):
+ assert path1.check(versioned=1)
+ p = path1.localpath.ensure("not_a_versioned_file")
+ l = [x.localpath
+ for x in path1.listdir(lambda x: x.check(versioned=True))]
+ assert p not in l
+
+ def test_nonversioned_remove(self, path1):
+ assert path1.check(versioned=1)
+ somefile = path1.join('nonversioned/somefile')
+ nonwc = py.path.local(somefile)
+ nonwc.ensure()
+ assert somefile.check()
+ assert not somefile.check(versioned=True)
+ somefile.remove() # this used to fail because it tried to 'svn rm'
+
+ def test_properties(self, path1):
+ try:
+ path1.propset('gaga', 'this')
+ assert path1.propget('gaga') == 'this'
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert path1.basename in [item.basename for item in
+ path1.status().prop_modified]
+ assert 'gaga' in path1.proplist()
+ assert path1.proplist()['gaga'] == 'this'
+
+ finally:
+ path1.propdel('gaga')
+
+ def test_proplist_recursive(self, path1):
+ s = path1.join('samplefile')
+ s.propset('gugu', 'that')
+ try:
+ p = path1.proplist(rec=1)
+ # Comparing just the file names, because paths are unpredictable
+ # on Windows. (long vs. 8.3 paths)
+ assert (path1 / 'samplefile').basename in [item.basename
+ for item in p]
+ finally:
+ s.propdel('gugu')
+
+ def test_long_properties(self, path1):
+ value = """
+ vadm:posix : root root 0100755
+ Properties on 'chroot/dns/var/bind/db.net.xots':
+ """
+ try:
+ path1.propset('gaga', value)
+ backvalue = path1.propget('gaga')
+ assert backvalue == value
+ #assert len(backvalue.split('\n')) == 1
+ finally:
+ path1.propdel('gaga')
+
+
+ def test_ensure(self, path1):
+ newpath = path1.ensure('a', 'b', 'c')
+ try:
+ assert newpath.check(exists=1, versioned=1)
+ newpath.write("hello")
+ newpath.ensure()
+ assert newpath.read() == "hello"
+ finally:
+ path1.join('a').remove(force=1)
+
+ def test_not_versioned(self, path1):
+ p = path1.localpath.mkdir('whatever')
+ f = path1.localpath.ensure('testcreatedfile')
+ try:
+ assert path1.join('whatever').check(versioned=0)
+ assert path1.join('testcreatedfile').check(versioned=0)
+ assert not path1.join('testcreatedfile').check(versioned=1)
+ finally:
+ p.remove(rec=1)
+ f.remove()
+
+ def test_lock_unlock(self, path1):
+ root = path1
+ somefile = root.join('somefile')
+ somefile.ensure(file=True)
+ # not yet added to repo
+ py.test.raises(Exception, 'somefile.lock()')
+ somefile.write('foo')
+ somefile.commit('test')
+ assert somefile.check(versioned=True)
+ somefile.lock()
+ try:
+ locked = root.status().locked
+ assert len(locked) == 1
+ assert locked[0].basename == somefile.basename
+ assert locked[0].dirpath().basename == somefile.dirpath().basename
+ #assert somefile.locked()
+ py.test.raises(Exception, 'somefile.lock()')
+ finally:
+ somefile.unlock()
+ #assert not somefile.locked()
+ locked = root.status().locked
+ assert locked == []
+ py.test.raises(Exception, 'somefile,unlock()')
+ somefile.remove()
+
+ def test_commit_nonrecursive(self, path1):
+ somedir = path1.join('sampledir')
+ somedir.mkdir("subsubdir")
+ somedir.propset('foo', 'bar')
+ status = somedir.status()
+ assert len(status.prop_modified) == 1
+ assert len(status.added) == 1
+
+ somedir.commit('non-recursive commit', rec=0)
+ status = somedir.status()
+ assert len(status.prop_modified) == 0
+ assert len(status.added) == 1
+
+ somedir.commit('recursive commit')
+ status = somedir.status()
+ assert len(status.prop_modified) == 0
+ assert len(status.added) == 0
+
+ def test_commit_return_value(self, path1):
+ testfile = path1.join('test.txt').ensure(file=True)
+ testfile.write('test')
+ rev = path1.commit('testing')
+ assert type(rev) == int
+
+ anotherfile = path1.join('another.txt').ensure(file=True)
+ anotherfile.write('test')
+ rev2 = path1.commit('testing more')
+ assert type(rev2) == int
+ assert rev2 == rev + 1
+
+ #def test_log(self, path1):
+ # l = path1.log()
+ # assert len(l) == 3 # might need to be upped if more tests are added
+
+class XTestWCSvnCommandPathSpecial:
+
+ rooturl = 'http://codespeak.net/svn/py.path/trunk/dist/py.path/test/data'
+ #def test_update_none_rev(self, path1):
+ # path = tmpdir.join('checkouttest')
+ # wcpath = newpath(xsvnwc=str(path), url=path1url)
+ # try:
+ # wcpath.checkout(rev=2100)
+ # wcpath.update()
+ # assert wcpath.info().rev > 2100
+ # finally:
+ # wcpath.localpath.remove(rec=1)
+
+def test_parse_wcinfotime():
+ assert (parse_wcinfotime('2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)') ==
+ 1149021926)
+ assert (parse_wcinfotime('2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)') ==
+ 1067287394)
+
+class TestInfoSvnWCCommand:
+
+ def test_svn_1_2(self, path1):
+ output = """
+ Path: test_svnwc.py
+ Name: test_svnwc.py
+ URL: http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py
+ Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ Revision: 28137
+ Node Kind: file
+ Schedule: normal
+ Last Changed Author: jan
+ Last Changed Rev: 27939
+ Last Changed Date: 2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)
+ Text Last Updated: 2006-06-01 00:42:53 +0200 (Thu, 01 Jun 2006)
+ Properties Last Updated: 2006-05-23 11:54:59 +0200 (Tue, 23 May 2006)
+ Checksum: 357e44880e5d80157cc5fbc3ce9822e3
+ """
+ path = py.path.local(__file__).dirpath().chdir()
+ try:
+ info = InfoSvnWCCommand(output)
+ finally:
+ path.chdir()
+ assert info.last_author == 'jan'
+ assert info.kind == 'file'
+ assert info.mtime == 1149021926.0
+ assert info.url == 'http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py'
+ assert info.time == 1149021926000000.0
+ assert info.rev == 28137
+
+
+ def test_svn_1_3(self, path1):
+ output = """
+ Path: test_svnwc.py
+ Name: test_svnwc.py
+ URL: http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py
+ Repository Root: http://codespeak.net/svn
+ Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ Revision: 28124
+ Node Kind: file
+ Schedule: normal
+ Last Changed Author: jan
+ Last Changed Rev: 27939
+ Last Changed Date: 2006-05-30 20:45:26 +0200 (Tue, 30 May 2006)
+ Text Last Updated: 2006-06-02 23:46:11 +0200 (Fri, 02 Jun 2006)
+ Properties Last Updated: 2006-06-02 23:45:28 +0200 (Fri, 02 Jun 2006)
+ Checksum: 357e44880e5d80157cc5fbc3ce9822e3
+ """
+ path = py.path.local(__file__).dirpath().chdir()
+ try:
+ info = InfoSvnWCCommand(output)
+ finally:
+ path.chdir()
+ assert info.last_author == 'jan'
+ assert info.kind == 'file'
+ assert info.mtime == 1149021926.0
+ assert info.url == 'http://codespeak.net/svn/py/dist/py/path/svn/wccommand.py'
+ assert info.rev == 28124
+ assert info.time == 1149021926000000.0
+
+
+def test_characters_at():
+ py.test.raises(ValueError, "py.path.svnwc('/tmp/@@@:')")
+
+def test_characters_tilde():
+ py.path.svnwc('/tmp/test~')
+
+
+class TestRepo:
+ def test_trailing_slash_is_stripped(self, path1):
+ # XXX we need to test more normalizing properties
+ url = path1.join("/")
+ assert path1 == url
+
+ #def test_different_revs_compare_unequal(self, path1):
+ # newpath = path1.new(rev=1199)
+ # assert newpath != path1
+
+ def test_exists_svn_root(self, path1):
+ assert path1.check()
+
+ #def test_not_exists_rev(self, path1):
+ # url = path1.__class__(path1url, rev=500)
+ # assert url.check(exists=0)
+
+ #def test_nonexisting_listdir_rev(self, path1):
+ # url = path1.__class__(path1url, rev=500)
+ # raises(py.error.ENOENT, url.listdir)
+
+ #def test_newrev(self, path1):
+ # url = path1.new(rev=None)
+ # assert url.rev == None
+ # assert url.strpath == path1.strpath
+ # url = path1.new(rev=10)
+ # assert url.rev == 10
+
+ #def test_info_rev(self, path1):
+ # url = path1.__class__(path1url, rev=1155)
+ # url = url.join("samplefile")
+ # res = url.info()
+ # assert res.size > len("samplefile") and res.created_rev == 1155
+
+ # the following tests are easier if we have a path class
+ def test_repocache_simple(self, path1):
+ repocache = svncommon.RepoCache()
+ repocache.put(path1.strpath, 42)
+ url, rev = repocache.get(path1.join('test').strpath)
+ assert rev == 42
+ assert url == path1.strpath
+
+ def test_repocache_notimeout(self, path1):
+ repocache = svncommon.RepoCache()
+ repocache.timeout = 0
+ repocache.put(path1.strpath, path1.rev)
+ url, rev = repocache.get(path1.strpath)
+ assert rev == -1
+ assert url == path1.strpath
+
+ def test_repocache_outdated(self, path1):
+ repocache = svncommon.RepoCache()
+ repocache.put(path1.strpath, 42, timestamp=0)
+ url, rev = repocache.get(path1.join('test').strpath)
+ assert rev == -1
+ assert url == path1.strpath
+
+ def _test_getreporev(self):
+ """ this test runs so slow it's usually disabled """
+ old = svncommon.repositories.repos
+ try:
+ _repocache.clear()
+ root = path1.new(rev=-1)
+ url, rev = cache.repocache.get(root.strpath)
+ assert rev>=0
+ assert url == svnrepourl
+ finally:
+ repositories.repos = old
diff --git a/testing/web-platform/tests/tools/py/testing/process/__init__.py b/testing/web-platform/tests/tools/py/testing/process/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/process/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/py/testing/process/test_cmdexec.py b/testing/web-platform/tests/tools/py/testing/process/test_cmdexec.py
new file mode 100644
index 000000000..b539e0af3
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/process/test_cmdexec.py
@@ -0,0 +1,39 @@
+import py
+from py.process import cmdexec
+
+def exvalue():
+ return py.std.sys.exc_info()[1]
+
+class Test_exec_cmd:
+ def test_simple(self):
+ out = cmdexec('echo hallo')
+ assert out.strip() == 'hallo'
+ assert py.builtin._istext(out)
+
+ def test_simple_newline(self):
+ import sys
+ out = cmdexec(r"""%s -c "print ('hello')" """ % sys.executable)
+ assert out == 'hello\n'
+ assert py.builtin._istext(out)
+
+ def test_simple_error(self):
+ py.test.raises (cmdexec.Error, cmdexec, 'exit 1')
+
+ def test_simple_error_exact_status(self):
+ try:
+ cmdexec('exit 1')
+ except cmdexec.Error:
+ e = exvalue()
+ assert e.status == 1
+ assert py.builtin._istext(e.out)
+ assert py.builtin._istext(e.err)
+
+ def test_err(self):
+ try:
+ cmdexec('echoqweqwe123 hallo')
+ raise AssertionError("command succeeded but shouldn't")
+ except cmdexec.Error:
+ e = exvalue()
+ assert hasattr(e, 'err')
+ assert hasattr(e, 'out')
+ assert e.err or e.out
diff --git a/testing/web-platform/tests/tools/py/testing/process/test_forkedfunc.py b/testing/web-platform/tests/tools/py/testing/process/test_forkedfunc.py
new file mode 100644
index 000000000..d4f9f985e
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/process/test_forkedfunc.py
@@ -0,0 +1,177 @@
+import pytest
+import py, sys, os
+
+pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
+
+
+def test_waitfinish_removes_tempdir():
+ ff = py.process.ForkedFunc(boxf1)
+ assert ff.tempdir.check()
+ ff.waitfinish()
+ assert not ff.tempdir.check()
+
+def test_tempdir_gets_gc_collected(monkeypatch):
+ monkeypatch.setattr(os, 'fork', lambda: os.getpid())
+ ff = py.process.ForkedFunc(boxf1)
+ assert ff.tempdir.check()
+ ff.__del__()
+ assert not ff.tempdir.check()
+
+def test_basic_forkedfunc():
+ result = py.process.ForkedFunc(boxf1).waitfinish()
+ assert result.out == "some out\n"
+ assert result.err == "some err\n"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 1
+
+def test_exitstatus():
+ def func():
+ os._exit(4)
+ result = py.process.ForkedFunc(func).waitfinish()
+ assert result.exitstatus == 4
+ assert result.signal == 0
+ assert not result.out
+ assert not result.err
+
+def test_execption_in_func():
+ def fun():
+ raise ValueError(42)
+ ff = py.process.ForkedFunc(fun)
+ result = ff.waitfinish()
+ assert result.exitstatus == ff.EXITSTATUS_EXCEPTION
+ assert result.err.find("ValueError: 42") != -1
+ assert result.signal == 0
+ assert not result.retval
+
+def test_forkedfunc_on_fds():
+ result = py.process.ForkedFunc(boxf2).waitfinish()
+ assert result.out == "someout"
+ assert result.err == "someerr"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 2
+
+def test_forkedfunc_on_fds_output():
+ result = py.process.ForkedFunc(boxf3).waitfinish()
+ assert result.signal == 11
+ assert result.out == "s"
+
+
+def test_forkedfunc_on_stdout():
+ def boxf3():
+ import sys
+ sys.stdout.write("hello\n")
+ os.kill(os.getpid(), 11)
+ result = py.process.ForkedFunc(boxf3).waitfinish()
+ assert result.signal == 11
+ assert result.out == "hello\n"
+
+def test_forkedfunc_signal():
+ result = py.process.ForkedFunc(boxseg).waitfinish()
+ assert result.retval is None
+ if sys.version_info < (2,4):
+ py.test.skip("signal detection does not work with python prior 2.4")
+ assert result.signal == 11
+
+def test_forkedfunc_huge_data():
+ result = py.process.ForkedFunc(boxhuge).waitfinish()
+ assert result.out
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 3
+
+def test_box_seq():
+ # we run many boxes with huge data, just one after another
+ for i in range(50):
+ result = py.process.ForkedFunc(boxhuge).waitfinish()
+ assert result.out
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 3
+
+def test_box_in_a_box():
+ def boxfun():
+ result = py.process.ForkedFunc(boxf2).waitfinish()
+ print (result.out)
+ sys.stderr.write(result.err + "\n")
+ return result.retval
+
+ result = py.process.ForkedFunc(boxfun).waitfinish()
+ assert result.out == "someout\n"
+ assert result.err == "someerr\n"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 2
+
+def test_kill_func_forked():
+ class A:
+ pass
+ info = A()
+ import time
+
+ def box_fun():
+ time.sleep(10) # we don't want to last forever here
+
+ ff = py.process.ForkedFunc(box_fun)
+ os.kill(ff.pid, 15)
+ result = ff.waitfinish()
+ if py.std.sys.version_info < (2,4):
+ py.test.skip("signal detection does not work with python prior 2.4")
+ assert result.signal == 15
+
+
+def test_hooks(monkeypatch):
+ def _boxed():
+ return 1
+
+ def _on_start():
+ sys.stdout.write("some out\n")
+ sys.stdout.flush()
+
+ def _on_exit():
+ sys.stderr.write("some err\n")
+ sys.stderr.flush()
+
+ result = py.process.ForkedFunc(_boxed, child_on_start=_on_start,
+ child_on_exit=_on_exit).waitfinish()
+ assert result.out == "some out\n"
+ assert result.err == "some err\n"
+ assert result.exitstatus == 0
+ assert result.signal == 0
+ assert result.retval == 1
+
+
+# ======================================================================
+# examples
+# ======================================================================
+#
+
+def boxf1():
+ sys.stdout.write("some out\n")
+ sys.stderr.write("some err\n")
+ return 1
+
+def boxf2():
+ os.write(1, "someout".encode('ascii'))
+ os.write(2, "someerr".encode('ascii'))
+ return 2
+
+def boxf3():
+ os.write(1, "s".encode('ascii'))
+ os.kill(os.getpid(), 11)
+
+def boxseg():
+ os.kill(os.getpid(), 11)
+
+def boxhuge():
+ s = " ".encode('ascii')
+ os.write(1, s * 10000)
+ os.write(2, s * 10000)
+ os.write(1, s * 10000)
+
+ os.write(1, s * 10000)
+ os.write(2, s * 10000)
+ os.write(2, s * 10000)
+ os.write(1, s * 10000)
+ return 3
diff --git a/testing/web-platform/tests/tools/py/testing/process/test_killproc.py b/testing/web-platform/tests/tools/py/testing/process/test_killproc.py
new file mode 100644
index 000000000..57088e1db
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/process/test_killproc.py
@@ -0,0 +1,16 @@
+
+import py, sys
+
+@py.test.mark.skipif("sys.platform.startswith('java')")
+def test_kill(tmpdir):
+ subprocess = py.test.importorskip("subprocess")
+ t = tmpdir.join("t.py")
+ t.write("import time ; time.sleep(100)")
+ proc = py.std.subprocess.Popen([sys.executable, str(t)])
+ assert proc.poll() is None # no return value yet
+ py.process.kill(proc.pid)
+ ret = proc.wait()
+ if sys.platform == "win32" and ret == 0:
+ py.test.skip("XXX on win32, subprocess.Popen().wait() on a killed "
+ "process does not yield return value != 0")
+ assert ret != 0
diff --git a/testing/web-platform/tests/tools/py/testing/root/__init__.py b/testing/web-platform/tests/tools/py/testing/root/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/root/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/py/testing/root/test_builtin.py b/testing/web-platform/tests/tools/py/testing/root/test_builtin.py
new file mode 100644
index 000000000..a6f1a3c73
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/root/test_builtin.py
@@ -0,0 +1,179 @@
+import sys
+import types
+import py
+from py.builtin import set, frozenset, reversed, sorted
+
+def test_enumerate():
+ l = [0,1,2]
+ for i,x in enumerate(l):
+ assert i == x
+
+def test_any():
+ assert not py.builtin.any([0,False, None])
+ assert py.builtin.any([0,False, None,1])
+
+def test_all():
+ assert not py.builtin.all([True, 1, False])
+ assert py.builtin.all([True, 1, object])
+
+def test_BaseException():
+ assert issubclass(IndexError, py.builtin.BaseException)
+ assert issubclass(Exception, py.builtin.BaseException)
+ assert issubclass(KeyboardInterrupt, py.builtin.BaseException)
+
+ class MyRandomClass(object):
+ pass
+ assert not issubclass(MyRandomClass, py.builtin.BaseException)
+
+ assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins')
+ assert Exception.__name__ == 'Exception'
+
+
+def test_GeneratorExit():
+ assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins')
+ assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException)
+
+def test_reversed():
+ reversed = py.builtin.reversed
+ r = reversed("hello")
+ assert iter(r) is r
+ s = "".join(list(r))
+ assert s == "olleh"
+ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o']
+ py.test.raises(TypeError, reversed, reversed("hello"))
+
+def test_simple():
+ s = set([1, 2, 3, 4])
+ assert s == set([3, 4, 2, 1])
+ s1 = s.union(set([5, 6]))
+ assert 5 in s1
+ assert 1 in s1
+
+def test_frozenset():
+ s = set([frozenset([0, 1]), frozenset([1, 0])])
+ assert len(s) == 1
+
+def test_sorted():
+ if sorted == py.builtin.sorted:
+ return # don't test a real builtin
+ for s in [py.builtin.sorted]:
+ def test():
+ assert s([3, 2, 1]) == [1, 2, 3]
+ assert s([1, 2, 3], reverse=True) == [3, 2, 1]
+ l = s([1, 2, 3, 4, 5, 6], key=lambda x: x % 2)
+ assert l == [2, 4, 6, 1, 3, 5]
+ l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y))
+ assert l == [4, 3, 2, 1]
+ l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y),
+ key=lambda x: x % 2)
+ assert l == [1, 3, 2, 4]
+
+ def compare(x, y):
+ assert type(x) == str
+ assert type(y) == str
+ return cmp(x, y)
+ data = 'The quick Brown fox Jumped over The lazy Dog'.split()
+ s(data, cmp=compare, key=str.lower)
+ yield test
+
+
+def test_print_simple():
+ from py.builtin import print_
+ py.test.raises(TypeError, "print_(hello=3)")
+ f = py.io.TextIO()
+ print_("hello", "world", file=f)
+ s = f.getvalue()
+ assert s == "hello world\n"
+
+ f = py.io.TextIO()
+ print_("hello", end="", file=f)
+ s = f.getvalue()
+ assert s == "hello"
+
+ f = py.io.TextIO()
+ print_("xyz", "abc", sep="", end="", file=f)
+ s = f.getvalue()
+ assert s == "xyzabc"
+
+ class X:
+ def __repr__(self): return "rep"
+ f = py.io.TextIO()
+ print_(X(), file=f)
+ assert f.getvalue() == "rep\n"
+
+def test_execfile(tmpdir):
+ test_file = tmpdir.join("test.py")
+ test_file.write("x = y\ndef f(): pass")
+ ns = {"y" : 42}
+ py.builtin.execfile(str(test_file), ns)
+ assert ns["x"] == 42
+ assert py.code.getrawcode(ns["f"]).co_filename == str(test_file)
+ class A:
+ y = 3
+ x = 4
+ py.builtin.execfile(str(test_file))
+ assert A.x == 3
+
+def test_getfuncdict():
+ def f():
+ pass
+ f.x = 4
+ assert py.builtin._getfuncdict(f)["x"] == 4
+ assert py.builtin._getfuncdict(2) is None
+
+def test_callable():
+ class A: pass
+ assert py.builtin.callable(test_callable)
+ assert py.builtin.callable(A)
+ assert py.builtin.callable(list)
+ assert py.builtin.callable(id)
+ assert not py.builtin.callable(4)
+ assert not py.builtin.callable("hi")
+
+def test_totext():
+ py.builtin._totext("hello", "UTF-8")
+
+def test_bytes_text():
+ if sys.version_info[0] < 3:
+ assert py.builtin.text == unicode
+ assert py.builtin.bytes == str
+ else:
+ assert py.builtin.text == str
+ assert py.builtin.bytes == bytes
+
+def test_totext_badutf8():
+ # this was in printouts within the pytest testsuite
+ # totext would fail
+ if sys.version_info >= (3,):
+ errors = 'surrogateescape'
+ else: # old python has crappy error handlers
+ errors = 'replace'
+ py.builtin._totext("\xa6", "UTF-8", errors)
+
+def test_reraise():
+ from py.builtin import _reraise
+ try:
+ raise Exception()
+ except Exception:
+ cls, val, tb = sys.exc_info()
+ excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)")
+
+def test_exec():
+ l = []
+ py.builtin.exec_("l.append(1)")
+ assert l == [1]
+ d = {}
+ py.builtin.exec_("x=4", d)
+ assert d['x'] == 4
+
+def test_tryimport():
+ py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123')
+ x = py.builtin._tryimport('asldkajsdl', 'py')
+ assert x == py
+ x = py.builtin._tryimport('asldkajsdl', 'py.path')
+ assert x == py.path
+
+def test_getcode():
+ code = py.builtin._getcode(test_getcode)
+ assert isinstance(code, types.CodeType)
+ assert py.builtin._getcode(4) is None
diff --git a/testing/web-platform/tests/tools/py/testing/root/test_error.py b/testing/web-platform/tests/tools/py/testing/root/test_error.py
new file mode 100644
index 000000000..a34e0068d
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/root/test_error.py
@@ -0,0 +1,37 @@
+
+import py
+
+import errno
+
+def test_error_classes():
+ for name in errno.errorcode.values():
+ x = getattr(py.error, name)
+ assert issubclass(x, py.error.Error)
+ assert issubclass(x, EnvironmentError)
+
+def test_picklability_issue1():
+ e1 = py.error.ENOENT()
+ s = py.std.pickle.dumps(e1)
+ e2 = py.std.pickle.loads(s)
+ assert isinstance(e2, py.error.ENOENT)
+
+def test_unknown_error():
+ num = 3999
+ cls = py.error._geterrnoclass(num)
+ assert cls.__name__ == 'UnknownErrno%d' % (num,)
+ assert issubclass(cls, py.error.Error)
+ assert issubclass(cls, EnvironmentError)
+ cls2 = py.error._geterrnoclass(num)
+ assert cls is cls2
+
+def test_error_conversion_ENOTDIR(testdir):
+ p = testdir.makepyfile("")
+ excinfo = py.test.raises(py.error.Error, py.error.checked_call, p.listdir)
+ assert isinstance(excinfo.value, EnvironmentError)
+ assert isinstance(excinfo.value, py.error.Error)
+ assert "ENOTDIR" in repr(excinfo.value)
+
+
+def test_checked_call_supports_kwargs(tmpdir):
+ import tempfile
+ py.error.checked_call(tempfile.mkdtemp, dir=str(tmpdir))
diff --git a/testing/web-platform/tests/tools/py/testing/root/test_py_imports.py b/testing/web-platform/tests/tools/py/testing/root/test_py_imports.py
new file mode 100644
index 000000000..5f5954e99
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/root/test_py_imports.py
@@ -0,0 +1,68 @@
+import py
+import types
+import sys
+
+def checksubpackage(name):
+ obj = getattr(py, name)
+ if hasattr(obj, '__map__'): # isinstance(obj, Module):
+ keys = dir(obj)
+ assert len(keys) > 0
+ print (obj.__map__)
+ for name in list(obj.__map__):
+ assert hasattr(obj, name), (obj, name)
+
+def test_dir():
+ for name in dir(py):
+ if not name.startswith('_'):
+ yield checksubpackage, name
+
+def test_virtual_module_identity():
+ from py import path as path1
+ from py import path as path2
+ assert path1 is path2
+ from py.path import local as local1
+ from py.path import local as local2
+ assert local1 is local2
+
+def test_importall():
+ base = py._pydir
+ nodirs = [
+ ]
+ if sys.version_info >= (3,0):
+ nodirs.append(base.join('_code', '_assertionold.py'))
+ else:
+ nodirs.append(base.join('_code', '_assertionnew.py'))
+
+ def recurse(p):
+ return p.check(dotfile=0) and p.basename != "attic"
+
+ for p in base.visit('*.py', recurse):
+ if p.basename == '__init__.py':
+ continue
+ relpath = p.new(ext='').relto(base)
+ if base.sep in relpath: # not py/*.py itself
+ for x in nodirs:
+ if p == x or p.relto(x):
+ break
+ else:
+ relpath = relpath.replace(base.sep, '.')
+ modpath = 'py.%s' % relpath
+ try:
+ check_import(modpath)
+ except py.test.skip.Exception:
+ pass
+
+def check_import(modpath):
+ py.builtin.print_("checking import", modpath)
+ assert __import__(modpath)
+
+def test_all_resolves():
+ seen = py.builtin.set([py])
+ lastlength = None
+ while len(seen) != lastlength:
+ lastlength = len(seen)
+ for item in py.builtin.frozenset(seen):
+ for value in item.__dict__.values():
+ if isinstance(value, type(py.test)):
+ seen.add(value)
+
diff --git a/testing/web-platform/tests/tools/py/testing/root/test_std.py b/testing/web-platform/tests/tools/py/testing/root/test_std.py
new file mode 100644
index 000000000..143556a05
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/root/test_std.py
@@ -0,0 +1,13 @@
+
+import py
+
+def test_os():
+ import os
+ assert py.std.os is os
+
+def test_import_error_converts_to_attributeerror():
+ py.test.raises(AttributeError, "py.std.xyzalskdj")
+
+def test_std_gets_it():
+ for x in py.std.sys.modules:
+ assert x in py.std.__dict__
diff --git a/testing/web-platform/tests/tools/py/testing/root/test_xmlgen.py b/testing/web-platform/tests/tools/py/testing/root/test_xmlgen.py
new file mode 100644
index 000000000..704d1492c
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/root/test_xmlgen.py
@@ -0,0 +1,145 @@
+
+import py
+from py._xmlgen import unicode, html, raw
+
+class ns(py.xml.Namespace):
+ pass
+
+def test_escape():
+ uvalue = py.builtin._totext('\xc4\x85\xc4\x87\n\xe2\x82\xac\n', 'utf-8')
+ class A:
+ def __unicode__(self):
+ return uvalue
+ def __str__(self):
+ x = self.__unicode__()
+ if py.std.sys.version_info[0] < 3:
+ return x.encode('utf-8')
+ return x
+ y = py.xml.escape(uvalue)
+ assert y == uvalue
+ x = py.xml.escape(A())
+ assert x == uvalue
+ if py.std.sys.version_info[0] < 3:
+ assert isinstance(x, unicode)
+ assert isinstance(y, unicode)
+ y = py.xml.escape(uvalue.encode('utf-8'))
+ assert y == uvalue
+
+
+def test_tag_with_text():
+ x = ns.hello("world")
+ u = unicode(x)
+ assert u == "<hello>world</hello>"
+
+def test_class_identity():
+ assert ns.hello is ns.hello
+
+def test_tag_with_text_and_attributes():
+ x = ns.some(name="hello", value="world")
+ assert x.attr.name == 'hello'
+ assert x.attr.value == 'world'
+ u = unicode(x)
+ assert u == '<some name="hello" value="world"/>'
+
+def test_tag_with_subclassed_attr_simple():
+ class my(ns.hello):
+ class Attr(ns.hello.Attr):
+ hello="world"
+ x = my()
+ assert x.attr.hello == 'world'
+ assert unicode(x) == '<my hello="world"/>'
+
+def test_tag_with_raw_attr():
+ x = html.object(data=raw('&'))
+ assert unicode(x) == '<object data="&"></object>'
+
+def test_tag_nested():
+ x = ns.hello(ns.world())
+ unicode(x) # triggers parentifying
+ assert x[0].parent is x
+ u = unicode(x)
+ assert u == '<hello><world/></hello>'
+
+def test_list_nested():
+ x = ns.hello([ns.world()]) #pass in a list here
+ u = unicode(x)
+ assert u == '<hello><world/></hello>'
+
+def test_tag_xmlname():
+ class my(ns.hello):
+ xmlname = 'world'
+ u = unicode(my())
+ assert u == '<world/>'
+
+def test_tag_with_text_entity():
+ x = ns.hello('world & rest')
+ u = unicode(x)
+ assert u == "<hello>world &amp; rest</hello>"
+
+def test_tag_with_text_and_attributes_entity():
+ x = ns.some(name="hello & world")
+ assert x.attr.name == "hello & world"
+ u = unicode(x)
+ assert u == '<some name="hello &amp; world"/>'
+
+def test_raw():
+ x = ns.some(py.xml.raw("<p>literal</p>"))
+ u = unicode(x)
+ assert u == "<some><p>literal</p></some>"
+
+
+def test_html_name_stickyness():
+ class my(html.p):
+ pass
+ x = my("hello")
+ assert unicode(x) == '<p>hello</p>'
+
+def test_stylenames():
+ class my:
+ class body(html.body):
+ style = html.Style(font_size = "12pt")
+ u = unicode(my.body())
+ assert u == '<body style="font-size: 12pt"></body>'
+
+def test_class_None():
+ t = html.body(class_=None)
+ u = unicode(t)
+ assert u == '<body></body>'
+
+def test_alternating_style():
+ alternating = (
+ html.Style(background="white"),
+ html.Style(background="grey"),
+ )
+ class my(html):
+ class li(html.li):
+ def style(self):
+ i = self.parent.index(self)
+ return alternating[i%2]
+ style = property(style)
+
+ x = my.ul(
+ my.li("hello"),
+ my.li("world"),
+ my.li("42"))
+ u = unicode(x)
+ assert u == ('<ul><li style="background: white">hello</li>'
+ '<li style="background: grey">world</li>'
+ '<li style="background: white">42</li>'
+ '</ul>')
+
+def test_singleton():
+ h = html.head(html.link(href="foo"))
+ assert unicode(h) == '<head><link href="foo"/></head>'
+
+ h = html.head(html.script(src="foo"))
+ assert unicode(h) == '<head><script src="foo"></script></head>'
+
+def test_inline():
+ h = html.div(html.span('foo'), html.span('bar'))
+ assert (h.unicode(indent=2) ==
+ '<div><span>foo</span><span>bar</span></div>')
+
+def test_object_tags():
+ o = html.object(html.object())
+ assert o.unicode(indent=0) == '<object><object></object></object>'
diff --git a/testing/web-platform/tests/tools/py/testing/test_iniconfig.py b/testing/web-platform/tests/tools/py/testing/test_iniconfig.py
new file mode 100644
index 000000000..9a7f72c11
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/testing/test_iniconfig.py
@@ -0,0 +1,299 @@
+import py
+import pytest
+from py._iniconfig import IniConfig, ParseError, __all__ as ALL
+from py._iniconfig import iscommentline
+from textwrap import dedent
+
+def pytest_generate_tests(metafunc):
+ if 'input' in metafunc.funcargnames:
+ for name, (input, expected) in check_tokens.items():
+ metafunc.addcall(id=name, funcargs={
+ 'input': input,
+ 'expected': expected,
+ })
+ elif hasattr(metafunc.function, 'multi'):
+ kwargs = metafunc.function.multi.kwargs
+ names, values = zip(*kwargs.items())
+ values = cartesian_product(*values)
+ for p in values:
+ metafunc.addcall(funcargs=dict(zip(names, p)))
+
+def cartesian_product(L,*lists):
+ # copied from http://bit.ly/cyIXjn
+ if not lists:
+ for x in L:
+ yield (x,)
+ else:
+ for x in L:
+ for y in cartesian_product(lists[0],*lists[1:]):
+ yield (x,)+y
+
+check_tokens = {
+ 'section': (
+ '[section]',
+ [(0, 'section', None, None)]
+ ),
+ 'value': (
+ 'value = 1',
+ [(0, None, 'value', '1')]
+ ),
+ 'value in section': (
+ '[section]\nvalue=1',
+ [(0, 'section', None, None), (1, 'section', 'value', '1')]
+ ),
+ 'value with continuation': (
+ 'names =\n Alice\n Bob',
+ [(0, None, 'names', 'Alice\nBob')]
+ ),
+ 'value with aligned continuation': (
+ 'names = Alice\n'
+ ' Bob',
+ [(0, None, 'names', 'Alice\nBob')]
+ ),
+ 'blank line':(
+ '[section]\n\nvalue=1',
+ [(0, 'section', None, None), (2, 'section', 'value', '1')]
+ ),
+ 'comment': (
+ '# comment',
+ []
+ ),
+ 'comment on value': (
+ 'value = 1',
+ [(0, None, 'value', '1')]
+ ),
+
+ 'comment on section': (
+ '[section] #comment',
+ [(0, 'section', None, None)]
+ ),
+ 'comment2': (
+ '; comment',
+ []
+ ),
+
+ 'comment2 on section': (
+ '[section] ;comment',
+ [(0, 'section', None, None)]
+ ),
+ 'pseudo section syntax in value': (
+ 'name = value []',
+ [(0, None, 'name', 'value []')]
+ ),
+ 'assignment in value': (
+ 'value = x = 3',
+ [(0, None, 'value', 'x = 3')]
+ ),
+ 'use of colon for name-values': (
+ 'name: y',
+ [(0, None, 'name', 'y')]
+ ),
+ 'use of colon without space': (
+ 'value:y=5',
+ [(0, None, 'value', 'y=5')]
+ ),
+ 'equality gets precedence': (
+ 'value=xyz:5',
+ [(0, None, 'value', 'xyz:5')]
+ ),
+
+}
+
+def parse(input):
+ # only for testing purposes - _parse() does not use state except path
+ ini = object.__new__(IniConfig)
+ ini.path = "sample"
+ return ini._parse(input.splitlines(True))
+
+def parse_a_error(input):
+ return py.test.raises(ParseError, parse, input)
+
+def test_tokenize(input, expected):
+ parsed = parse(input)
+ assert parsed == expected
+
+def test_parse_empty():
+ parsed = parse("")
+ assert not parsed
+ ini = IniConfig("sample", "")
+ assert not ini.sections
+
+def test_ParseError():
+ e = ParseError("filename", 0, "hello")
+ assert str(e) == "filename:1: hello"
+
+def test_continuation_needs_perceeding_token():
+ excinfo = parse_a_error(' Foo')
+ assert excinfo.value.lineno == 0
+
+def test_continuation_cant_be_after_section():
+ excinfo = parse_a_error('[section]\n Foo')
+ assert excinfo.value.lineno == 1
+
+def test_section_cant_be_empty():
+ excinfo = parse_a_error('[]')
+
+@py.test.mark.multi(line=[
+ '!!',
+ ])
+def test_error_on_weird_lines(line):
+ parse_a_error(line)
+
+def test_iniconfig_from_file(tmpdir):
+ path = tmpdir/'test.txt'
+ path.write('[metadata]\nname=1')
+
+ config = IniConfig(path=path)
+ assert list(config.sections) == ['metadata']
+ config = IniConfig(path, "[diff]")
+ assert list(config.sections) == ['diff']
+ py.test.raises(TypeError, "IniConfig(data=path.read())")
+
+def test_iniconfig_section_first(tmpdir):
+ excinfo = py.test.raises(ParseError, """
+ IniConfig("x", data='name=1')
+ """)
+ assert excinfo.value.msg == "no section header defined"
+
+def test_iniconig_section_duplicate_fails():
+ excinfo = py.test.raises(ParseError, r"""
+ IniConfig("x", data='[section]\n[section]')
+ """)
+ assert 'duplicate section' in str(excinfo.value)
+
+def test_iniconfig_duplicate_key_fails():
+ excinfo = py.test.raises(ParseError, r"""
+ IniConfig("x", data='[section]\nname = Alice\nname = bob')
+ """)
+
+ assert 'duplicate name' in str(excinfo.value)
+
+def test_iniconfig_lineof():
+ config = IniConfig("x.ini", data=
+ '[section]\n'
+ 'value = 1\n'
+ '[section2]\n'
+ '# comment\n'
+ 'value =2'
+ )
+
+ assert config.lineof('missing') is None
+ assert config.lineof('section') == 1
+ assert config.lineof('section2') == 3
+ assert config.lineof('section', 'value') == 2
+ assert config.lineof('section2','value') == 5
+
+ assert config['section'].lineof('value') == 2
+ assert config['section2'].lineof('value') == 5
+
+def test_iniconfig_get_convert():
+ config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
+ assert config.get('section', 'int') == '1'
+ assert config.get('section', 'int', convert=int) == 1
+
+def test_iniconfig_get_missing():
+ config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
+ assert config.get('section', 'missing', default=1) == 1
+ assert config.get('section', 'missing') is None
+
+def test_section_get():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ section = config['section']
+ assert section.get('value', convert=int) == 1
+ assert section.get('value', 1) == "1"
+ assert section.get('missing', 2) == 2
+
+def test_missing_section():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ py.test.raises(KeyError,'config["other"]')
+
+def test_section_getitem():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ assert config['section']['value'] == '1'
+ assert config['section']['value'] == '1'
+
+def test_section_iter():
+ config = IniConfig("x", data='[section]\nvalue=1')
+ names = list(config['section'])
+ assert names == ['value']
+ items = list(config['section'].items())
+ assert items==[('value', '1')]
+
+def test_config_iter():
+ config = IniConfig("x.ini", data=dedent('''
+ [section1]
+ value=1
+ [section2]
+ value=2
+ '''))
+ l = list(config)
+ assert len(l) == 2
+ assert l[0].name == 'section1'
+ assert l[0]['value'] == '1'
+ assert l[1].name == 'section2'
+ assert l[1]['value'] == '2'
+
+def test_config_contains():
+ config = IniConfig("x.ini", data=dedent('''
+ [section1]
+ value=1
+ [section2]
+ value=2
+ '''))
+ assert 'xyz' not in config
+ assert 'section1' in config
+ assert 'section2' in config
+
+def test_iter_file_order():
+ config = IniConfig("x.ini", data="""
+[section2] #cpython dict ordered before section
+value = 1
+value2 = 2 # dict ordered before value
+[section]
+a = 1
+b = 2
+""")
+ l = list(config)
+ secnames = [x.name for x in l]
+ assert secnames == ['section2', 'section']
+ assert list(config['section2']) == ['value', 'value2']
+ assert list(config['section']) == ['a', 'b']
+
+def test_example_pypirc():
+ config = IniConfig("pypirc", data=dedent('''
+ [distutils]
+ index-servers =
+ pypi
+ other
+
+ [pypi]
+ repository: <repository-url>
+ username: <username>
+ password: <password>
+
+ [other]
+ repository: http://example.com/pypi
+ username: <username>
+ password: <password>
+ '''))
+ distutils, pypi, other = list(config)
+ assert distutils["index-servers"] == "pypi\nother"
+ assert pypi['repository'] == '<repository-url>'
+ assert pypi['username'] == '<username>'
+ assert pypi['password'] == '<password>'
+ assert ['repository', 'username', 'password'] == list(other)
+
+
+def test_api_import():
+ assert ALL == ['IniConfig', 'ParseError']
+
+@pytest.mark.parametrize("line", [
+ "#qwe",
+ " #qwe",
+ ";qwe",
+ " ;qwe",
+])
+def test_iscommentline_true(line):
+ assert iscommentline(line)
+
+
diff --git a/testing/web-platform/tests/tools/py/tox.ini b/testing/web-platform/tests/tools/py/tox.ini
new file mode 100644
index 000000000..8c0c79d69
--- /dev/null
+++ b/testing/web-platform/tests/tools/py/tox.ini
@@ -0,0 +1,39 @@
+[tox]
+envlist=py26,py27,py33,py34,external
+# py27-xdist causes problems with svn, py25 requires virtualenv==1.9.1
+#indexserver=
+# default=http://pypi.testrun.org
+
+[testenv]
+changedir=testing
+commands=py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml []
+deps=pytest
+
+[testenv:py27-xdist]
+basepython=python2.7
+deps=
+ pytest
+ pytest-xdist
+commands=
+ py.test -n3 -rfsxX --confcutdir=.. --runslowtests \
+ --junitxml={envlogdir}/junit-{envname}.xml []
+
+[testenv:jython]
+changedir=testing
+commands=
+ {envpython} -m pytest --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}0.xml {posargs:io_ code}
+
+[testenv:py25]
+setenv = PIP_INSECURE=1
+
+[testenv:external]
+deps=
+ pytest
+ jinja2
+ decorator
+commands=
+ py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml {posargs:code}
+
+[pytest]
+rsyncdirs = conftest.py py doc testing
+addopts = -rxXf
diff --git a/testing/web-platform/tests/tools/pytest.ini b/testing/web-platform/tests/tools/pytest.ini
new file mode 100644
index 000000000..1fb328e1f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+norecursedirs = .* {arch} *.egg html5lib py pytest pywebsocket six webdriver wptserve
diff --git a/testing/web-platform/tests/tools/pytest/.gitattributes b/testing/web-platform/tests/tools/pytest/.gitattributes
new file mode 100644
index 000000000..242d3da0d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/.gitattributes
@@ -0,0 +1 @@
+CHANGELOG merge=union
diff --git a/testing/web-platform/tests/tools/pytest/.github/ISSUE_TEMPLATE.md b/testing/web-platform/tests/tools/pytest/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..bc62e8a3f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,8 @@
+Thanks for submitting an issue!
+
+Here's a quick checklist in what to include:
+
+- [ ] Include a detailed description of the bug or suggestion
+- [ ] `pip list` of the virtual environment you are using
+- [ ] py.test and operating system versions
+- [ ] Minimal example if possible
diff --git a/testing/web-platform/tests/tools/pytest/.github/PULL_REQUEST_TEMPLATE.md b/testing/web-platform/tests/tools/pytest/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..d09edce43
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,8 @@
+Thanks for submitting a PR, your contribution is really appreciated!
+
+Here's a quick checklist that should be present in PRs:
+
+- [ ] Target: for bug or doc fixes, target `master`; for new features, target `features`
+- [ ] Make sure to include one or more tests for your change
+- [ ] Add yourself to `AUTHORS`
+- [ ] Add a new entry to the `CHANGELOG` (choose any open position to avoid merge conflicts with other PRs)
diff --git a/testing/web-platform/tests/tools/pytest/.gitignore b/testing/web-platform/tests/tools/pytest/.gitignore
new file mode 100644
index 000000000..e4355b859
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/.gitignore
@@ -0,0 +1,34 @@
+# Automatically generated by `hgimportsvn`
+.svn
+.hgsvn
+
+# Ignore local virtualenvs
+lib/
+bin/
+include/
+.Python/
+
+# These lines are suggested according to the svn:ignore property
+# Feel free to enable them by uncommenting them
+*.pyc
+*.pyo
+*.swp
+*.class
+*.orig
+*~
+
+.eggs/
+
+doc/*/_build
+build/
+dist/
+*.egg-info
+issue/
+env/
+.env/
+3rdparty/
+.tox
+.cache
+.coverage
+.ropeproject
+.idea
diff --git a/testing/web-platform/tests/tools/pytest/.travis.yml b/testing/web-platform/tests/tools/pytest/.travis.yml
new file mode 100644
index 000000000..3a8f36e95
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/.travis.yml
@@ -0,0 +1,40 @@
+sudo: false
+language: python
+python:
+ - '3.5'
+# command to install dependencies
+install: "pip install -U tox"
+# # command to run tests
+env:
+ matrix:
+ # coveralls is not listed in tox's envlist, but should run in travis
+ - TESTENV=coveralls
+ # note: please use "tox --listenvs" to populate the build matrix below
+ - TESTENV=linting
+ - TESTENV=py26
+ - TESTENV=py27
+ - TESTENV=py33
+ - TESTENV=py34
+ - TESTENV=py35
+ - TESTENV=pypy
+ - TESTENV=py27-pexpect
+ - TESTENV=py27-xdist
+ - TESTENV=py27-trial
+ - TESTENV=py35-pexpect
+ - TESTENV=py35-xdist
+ - TESTENV=py35-trial
+ - TESTENV=py27-nobyte
+ - TESTENV=doctesting
+ - TESTENV=py27-cxfreeze
+
+script: tox --recreate -e $TESTENV
+
+notifications:
+ irc:
+ channels:
+ - "chat.freenode.net#pytest"
+ on_success: change
+ on_failure: change
+ skip_join: true
+ email:
+ - pytest-commit@python.org
diff --git a/testing/web-platform/tests/tools/pytest/AUTHORS b/testing/web-platform/tests/tools/pytest/AUTHORS
new file mode 100644
index 000000000..dfc0a542e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/AUTHORS
@@ -0,0 +1,87 @@
+Holger Krekel, holger at merlinux eu
+merlinux GmbH, Germany, office at merlinux eu
+
+Contributors include::
+
+Abhijeet Kasurde
+Anatoly Bubenkoff
+Andreas Zeidler
+Andy Freeland
+Anthon van der Neut
+Armin Rigo
+Aron Curzon
+Aviv Palivoda
+Benjamin Peterson
+Bob Ippolito
+Brian Dorsey
+Brian Okken
+Brianna Laugher
+Bruno Oliveira
+Carl Friedrich Bolz
+Charles Cloud
+Chris Lamb
+Christian Theunert
+Christian Tismer
+Christopher Gilling
+Daniel Grana
+Daniel Hahler
+Daniel Nuri
+Dave Hunt
+David Mohr
+David Vierra
+Edison Gustavo Muenz
+Eduardo Schettino
+Endre Galaczi
+Elizaveta Shashkova
+Eric Hunsberger
+Eric Siegerman
+Erik M. Bray
+Florian Bruhin
+Floris Bruynooghe
+Gabriel Reis
+Georgy Dyuldin
+Graham Horler
+Grig Gheorghiu
+Guido Wesdorp
+Harald Armin Massa
+Ian Bicking
+Jaap Broekhuizen
+Jan Balster
+Janne Vanhala
+Jason R. Coombs
+Joshua Bronson
+Jurko Gospodnetić
+Katarzyna Jachim
+Kevin Cox
+Lee Kamentsky
+Lukas Bednar
+Maciek Fijalkowski
+Maho
+Marc Schlaich
+Mark Abramowitz
+Markus Unterwaditzer
+Martijn Faassen
+Matt Bachmann
+Michael Aquilina
+Michael Birtwell
+Michael Droettboom
+Nicolas Delaby
+Pieter Mulder
+Piotr Banaszkiewicz
+Punyashloka Biswal
+Ralf Schmitt
+Raphael Pierzina
+Ronny Pfannschmidt
+Ross Lawley
+Ryan Wooden
+Samuele Pedroni
+Tom Viner
+Trevor Bekolay
+Wouter van Ackooy
+David Díaz-Barquero
+Eric Hunsberger
+Simon Gomizelj
+Russel Winder
+Ben Webb
+Alexei Kozlenok
+Cal Leeming
diff --git a/testing/web-platform/tests/tools/pytest/CHANGELOG.rst b/testing/web-platform/tests/tools/pytest/CHANGELOG.rst
new file mode 100644
index 000000000..f18f646f0
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/CHANGELOG.rst
@@ -0,0 +1,2586 @@
+2.9.1
+=====
+
+**Bug Fixes**
+
+* Improve error message when a plugin fails to load.
+ Thanks `@nicoddemus`_ for the PR.
+
+* Fix (`#1178 <https://github.com/pytest-dev/pytest/issues/1178>`_):
+ ``pytest.fail`` with non-ascii characters raises an internal pytest error.
+ Thanks `@nicoddemus`_ for the PR.
+
+* Fix (`#469`_): junit parses report.nodeid incorrectly, when params IDs
+ contain ``::``. Thanks `@tomviner`_ for the PR (`#1431`_).
+
+* Fix (`#578 <https://github.com/pytest-dev/pytest/issues/578>`_): SyntaxErrors
+ containing non-ascii lines at the point of failure generated an internal
+ py.test error.
+ Thanks `@asottile`_ for the report and `@nicoddemus`_ for the PR.
+
+* Fix (`#1437`_): When passing in a bytestring regex pattern to parameterize
+ attempt to decode it as utf-8 ignoring errors.
+
+* Fix (`#649`_): parametrized test nodes cannot be specified to run on the command line.
+
+
+.. _#1437: https://github.com/pytest-dev/pytest/issues/1437
+.. _#469: https://github.com/pytest-dev/pytest/issues/469
+.. _#1431: https://github.com/pytest-dev/pytest/pull/1431
+.. _#649: https://github.com/pytest-dev/pytest/issues/649
+
+.. _@asottile: https://github.com/asottile
+
+
+2.9.0
+=====
+
+**New Features**
+
+* New ``pytest.mark.skip`` mark, which unconditionally skips marked tests.
+ Thanks `@MichaelAquilina`_ for the complete PR (`#1040`_).
+
+* ``--doctest-glob`` may now be passed multiple times in the command-line.
+ Thanks `@jab`_ and `@nicoddemus`_ for the PR.
+
+* New ``-rp`` and ``-rP`` reporting options give the summary and full output
+ of passing tests, respectively. Thanks to `@codewarrior0`_ for the PR.
+
+* ``pytest.mark.xfail`` now has a ``strict`` option, which makes ``XPASS``
+ tests to fail the test suite (defaulting to ``False``). There's also a
+ ``xfail_strict`` ini option that can be used to configure it project-wise.
+ Thanks `@rabbbit`_ for the request and `@nicoddemus`_ for the PR (`#1355`_).
+
+* ``Parser.addini`` now supports options of type ``bool``.
+ Thanks `@nicoddemus`_ for the PR.
+
+* New ``ALLOW_BYTES`` doctest option. This strips ``b`` prefixes from byte strings
+ in doctest output (similar to ``ALLOW_UNICODE``).
+ Thanks `@jaraco`_ for the request and `@nicoddemus`_ for the PR (`#1287`_).
+
+* Give a hint on ``KeyboardInterrupt`` to use the ``--fulltrace`` option to show the errors.
+ Fixes `#1366`_.
+ Thanks to `@hpk42`_ for the report and `@RonnyPfannschmidt`_ for the PR.
+
+* Catch ``IndexError`` exceptions when getting exception source location.
+ Fixes a pytest internal error for dynamically generated code (fixtures and tests)
+ where source lines are fake by intention.
+
+**Changes**
+
+* **Important**: `py.code <http://pylib.readthedocs.org/en/latest/code.html>`_ has been
+ merged into the ``pytest`` repository as ``pytest._code``. This decision
+ was made because ``py.code`` had very few uses outside ``pytest`` and the
+ fact that it was in a different repository made it difficult to fix bugs on
+ its code in a timely manner. The team hopes with this to be able to better
+ refactor out and improve that code.
+ This change shouldn't affect users, but it is useful to let users aware
+ if they encounter any strange behavior.
+
+ Keep in mind that the code for ``pytest._code`` is **private** and
+ **experimental**, so you definitely should not import it explicitly!
+
+ Please note that the original ``py.code`` is still available in
+ `pylib <http://pylib.readthedocs.org>`_.
+
+* ``pytest_enter_pdb`` now optionally receives the pytest config object.
+ Thanks `@nicoddemus`_ for the PR.
+
+* Removed code and documentation for Python 2.5 or lower versions,
+ including removal of the obsolete ``_pytest.assertion.oldinterpret`` module.
+ Thanks `@nicoddemus`_ for the PR (`#1226`_).
+
+* Comparisons now always show up in full when ``CI`` or ``BUILD_NUMBER`` is
+ found in the environment, even when ``-vv`` isn't used.
+ Thanks `@The-Compiler`_ for the PR.
+
+* ``--lf`` and ``--ff`` now support long names: ``--last-failed`` and
+ ``--failed-first`` respectively.
+ Thanks `@MichaelAquilina`_ for the PR.
+
+* Added expected exceptions to ``pytest.raises`` fail message.
+
+* Collection only displays progress ("collecting X items") when in a terminal.
+ This avoids cluttering the output when using ``--color=yes`` to obtain
+ colors in CI integrations systems (`#1397`_).
+
+**Bug Fixes**
+
+* The ``-s`` and ``-c`` options should now work under ``xdist``;
+ ``Config.fromdictargs`` now represents its input much more faithfully.
+ Thanks to `@bukzor`_ for the complete PR (`#680`_).
+
+* Fix (`#1290`_): support Python 3.5's ``@`` operator in assertion rewriting.
+ Thanks `@Shinkenjoe`_ for report with test case and `@tomviner`_ for the PR.
+
+* Fix formatting utf-8 explanation messages (`#1379`_).
+ Thanks `@biern`_ for the PR.
+
+* Fix `traceback style docs`_ to describe all of the available options
+ (auto/long/short/line/native/no), with `auto` being the default since v2.6.
+ Thanks `@hackebrot`_ for the PR.
+
+* Fix (`#1422`_): junit record_xml_property doesn't allow multiple records
+ with same name.
+
+.. _`traceback style docs`: https://pytest.org/latest/usage.html#modifying-python-traceback-printing
+
+.. _#1422: https://github.com/pytest-dev/pytest/issues/1422
+.. _#1379: https://github.com/pytest-dev/pytest/issues/1379
+.. _#1366: https://github.com/pytest-dev/pytest/issues/1366
+.. _#1040: https://github.com/pytest-dev/pytest/pull/1040
+.. _#680: https://github.com/pytest-dev/pytest/issues/680
+.. _#1287: https://github.com/pytest-dev/pytest/pull/1287
+.. _#1226: https://github.com/pytest-dev/pytest/pull/1226
+.. _#1290: https://github.com/pytest-dev/pytest/pull/1290
+.. _#1355: https://github.com/pytest-dev/pytest/pull/1355
+.. _#1397: https://github.com/pytest-dev/pytest/issues/1397
+.. _@biern: https://github.com/biern
+.. _@MichaelAquilina: https://github.com/MichaelAquilina
+.. _@bukzor: https://github.com/bukzor
+.. _@hpk42: https://github.com/hpk42
+.. _@nicoddemus: https://github.com/nicoddemus
+.. _@jab: https://github.com/jab
+.. _@codewarrior0: https://github.com/codewarrior0
+.. _@jaraco: https://github.com/jaraco
+.. _@The-Compiler: https://github.com/The-Compiler
+.. _@Shinkenjoe: https://github.com/Shinkenjoe
+.. _@tomviner: https://github.com/tomviner
+.. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt
+.. _@rabbbit: https://github.com/rabbbit
+.. _@hackebrot: https://github.com/hackebrot
+
+2.8.7
+=====
+
+- fix #1338: use predictable object resolution for monkeypatch
+
+2.8.6
+=====
+
+- fix #1259: allow for double nodeids in junitxml,
+ this was a regression failing plugins combinations
+ like pytest-pep8 + pytest-flakes
+
+- Workaround for exception that occurs in pyreadline when using
+ ``--pdb`` with standard I/O capture enabled.
+ Thanks Erik M. Bray for the PR.
+
+- fix #900: Better error message in case the target of a ``monkeypatch`` call
+ raises an ``ImportError``.
+
+- fix #1292: monkeypatch calls (setattr, setenv, etc.) are now O(1).
+ Thanks David R. MacIver for the report and Bruno Oliveira for the PR.
+
+- fix #1223: captured stdout and stderr are now properly displayed before
+ entering pdb when ``--pdb`` is used instead of being thrown away.
+ Thanks Cal Leeming for the PR.
+
+- fix #1305: pytest warnings emitted during ``pytest_terminal_summary`` are now
+ properly displayed.
+ Thanks Ionel Maries Cristian for the report and Bruno Oliveira for the PR.
+
+- fix #628: fixed internal UnicodeDecodeError when doctests contain unicode.
+ Thanks Jason R. Coombs for the report and Bruno Oliveira for the PR.
+
+- fix #1334: Add captured stdout to jUnit XML report on setup error.
+ Thanks Georgy Dyuldin for the PR.
+
+
+2.8.5
+=====
+
+- fix #1243: fixed issue where class attributes injected during collection could break pytest.
+ PR by Alexei Kozlenok, thanks Ronny Pfannschmidt and Bruno Oliveira for the review and help.
+
+- fix #1074: precompute junitxml chunks instead of storing the whole tree in objects
+ Thanks Bruno Oliveira for the report and Ronny Pfannschmidt for the PR
+
+- fix #1238: fix ``pytest.deprecated_call()`` receiving multiple arguments
+ (Regression introduced in 2.8.4). Thanks Alex Gaynor for the report and
+ Bruno Oliveira for the PR.
+
+
+2.8.4
+=====
+
+- fix #1190: ``deprecated_call()`` now works when the deprecated
+ function has been already called by another test in the same
+ module. Thanks Mikhail Chernykh for the report and Bruno Oliveira for the
+ PR.
+
+- fix #1198: ``--pastebin`` option now works on Python 3. Thanks
+ Mehdy Khoshnoody for the PR.
+
+- fix #1219: ``--pastebin`` now works correctly when captured output contains
+ non-ascii characters. Thanks Bruno Oliveira for the PR.
+
+- fix #1204: another error when collecting with a nasty __getattr__().
+ Thanks Florian Bruhin for the PR.
+
+- fix the summary printed when no tests did run.
+ Thanks Florian Bruhin for the PR.
+- fix #1185 - ensure MANIFEST.in exactly matches what should go to a sdist
+
+- a number of documentation modernizations wrt good practices.
+ Thanks Bruno Oliveira for the PR.
+
+2.8.3
+=====
+
+- fix #1169: add __name__ attribute to testcases in TestCaseFunction to
+ support the @unittest.skip decorator on functions and methods.
+ Thanks Lee Kamentsky for the PR.
+
+- fix #1035: collecting tests if test module level obj has __getattr__().
+ Thanks Suor for the report and Bruno Oliveira / Tom Viner for the PR.
+
+- fix #331: don't collect tests if their failure cannot be reported correctly
+ e.g. they are a callable instance of a class.
+
+- fix #1133: fixed internal error when filtering tracebacks where one entry
+ belongs to a file which is no longer available.
+ Thanks Bruno Oliveira for the PR.
+
+- enhancement made to highlight in red the name of the failing tests so
+ they stand out in the output.
+ Thanks Gabriel Reis for the PR.
+
+- add more talks to the documentation
+- extend documentation on the --ignore cli option
+- use pytest-runner for setuptools integration
+- minor fixes for interaction with OS X El Capitan
+ system integrity protection (thanks Florian)
+
+
+2.8.2
+=====
+
+- fix #1085: proper handling of encoding errors when passing encoded byte
+ strings to pytest.parametrize in Python 2.
+ Thanks Themanwithoutaplan for the report and Bruno Oliveira for the PR.
+
+- fix #1087: handling SystemError when passing empty byte strings to
+ pytest.parametrize in Python 3.
+ Thanks Paul Kehrer for the report and Bruno Oliveira for the PR.
+
+- fix #995: fixed internal error when filtering tracebacks where one entry
+ was generated by an exec() statement.
+ Thanks Daniel Hahler, Ashley C Straw, Philippe Gauthier and Pavel Savchenko
+ for contributing and Bruno Oliveira for the PR.
+
+- fix #1100 and #1057: errors when using autouse fixtures and doctest modules.
+ Thanks Sergey B Kirpichev and Vital Kudzelka for contributing and Bruno
+ Oliveira for the PR.
+
+2.8.1
+=====
+
+- fix #1034: Add missing nodeid on pytest_logwarning call in
+ addhook. Thanks Simon Gomizelj for the PR.
+
+- 'deprecated_call' is now only satisfied with a DeprecationWarning or
+ PendingDeprecationWarning. Before 2.8.0, it accepted any warning, and 2.8.0
+ made it accept only DeprecationWarning (but not PendingDeprecationWarning).
+ Thanks Alex Gaynor for the issue and Eric Hunsberger for the PR.
+
+- fix issue #1073: avoid calling __getattr__ on potential plugin objects.
+ This fixes an incompatibility with pytest-django. Thanks Andreas Pelme,
+ Bruno Oliveira and Ronny Pfannschmidt for contributing and Holger Krekel
+ for the fix.
+
+- Fix issue #704: handle versionconflict during plugin loading more
+ gracefully. Thanks Bruno Oliveira for the PR.
+
+- Fix issue #1064: ""--junitxml" regression when used with the
+ "pytest-xdist" plugin, with test reports being assigned to the wrong tests.
+ Thanks Daniel Grunwald for the report and Bruno Oliveira for the PR.
+
+- (experimental) adapt more SEMVER style versioning and change meaning of
+ master branch in git repo: "master" branch now keeps the bugfixes, changes
+ aimed for micro releases. "features" branch will only be be released
+ with minor or major pytest releases.
+
+- Fix issue #766 by removing documentation references to distutils.
+ Thanks Russel Winder.
+
+- Fix issue #1030: now byte-strings are escaped to produce item node ids
+ to make them always serializable.
+ Thanks Andy Freeland for the report and Bruno Oliveira for the PR.
+
+- Python 2: if unicode parametrized values are convertible to ascii, their
+ ascii representation is used for the node id.
+
+- Fix issue #411: Add __eq__ method to assertion comparison example.
+ Thanks Ben Webb.
+- Fix issue #653: deprecated_call can be used as context manager.
+
+- fix issue 877: properly handle assertion explanations with non-ascii repr
+ Thanks Mathieu Agopian for the report and Ronny Pfannschmidt for the PR.
+
+- fix issue 1029: transform errors when writing cache values into pytest-warnings
+
+2.8.0
+=====
+
+- new ``--lf`` and ``-ff`` options to run only the last failing tests or
+ "failing tests first" from the last run. This functionality is provided
+ through porting the formerly external pytest-cache plugin into pytest core.
+ BACKWARD INCOMPAT: if you used pytest-cache's functionality to persist
+ data between test runs be aware that we don't serialize sets anymore.
+ Thanks Ronny Pfannschmidt for most of the merging work.
+
+- "-r" option now accepts "a" to include all possible reports, similar
+ to passing "fEsxXw" explicitly (isse960).
+ Thanks Abhijeet Kasurde for the PR.
+
+- avoid python3.5 deprecation warnings by introducing version
+ specific inspection helpers, thanks Michael Droettboom.
+
+- fix issue562: @nose.tools.istest now fully respected.
+
+- fix issue934: when string comparison fails and a diff is too large to display
+ without passing -vv, still show a few lines of the diff.
+ Thanks Florian Bruhin for the report and Bruno Oliveira for the PR.
+
+- fix issue736: Fix a bug where fixture params would be discarded when combined
+ with parametrization markers.
+ Thanks to Markus Unterwaditzer for the PR.
+
+- fix issue710: introduce ALLOW_UNICODE doctest option: when enabled, the
+ ``u`` prefix is stripped from unicode strings in expected doctest output. This
+ allows doctests which use unicode to run in Python 2 and 3 unchanged.
+ Thanks Jason R. Coombs for the report and Bruno Oliveira for the PR.
+
+- parametrize now also generates meaningful test IDs for enum, regex and class
+ objects (as opposed to class instances).
+ Thanks to Florian Bruhin for the PR.
+
+- Add 'warns' to assert that warnings are thrown (like 'raises').
+ Thanks to Eric Hunsberger for the PR.
+
+- Fix issue683: Do not apply an already applied mark. Thanks ojake for the PR.
+
+- Deal with capturing failures better so fewer exceptions get lost to
+ /dev/null. Thanks David Szotten for the PR.
+
+- fix issue730: deprecate and warn about the --genscript option.
+ Thanks Ronny Pfannschmidt for the report and Christian Pommranz for the PR.
+
+- fix issue751: multiple parametrize with ids bug if it parametrizes class with
+ two or more test methods. Thanks Sergey Chipiga for reporting and Jan
+ Bednarik for PR.
+
+- fix issue82: avoid loading conftest files from setup.cfg/pytest.ini/tox.ini
+ files and upwards by default (--confcutdir can still be set to override this).
+ Thanks Bruno Oliveira for the PR.
+
+- fix issue768: docstrings found in python modules were not setting up session
+ fixtures. Thanks Jason R. Coombs for reporting and Bruno Oliveira for the PR.
+
+- added ``tmpdir_factory``, a session-scoped fixture that can be used to create
+ directories under the base temporary directory. Previously this object was
+ installed as a ``_tmpdirhandler`` attribute of the ``config`` object, but now it
+ is part of the official API and using ``config._tmpdirhandler`` is
+ deprecated.
+ Thanks Bruno Oliveira for the PR.
+
+- fix issue808: pytest's internal assertion rewrite hook now implements the
+ optional PEP302 get_data API so tests can access data files next to them.
+ Thanks xmo-odoo for request and example and Bruno Oliveira for
+ the PR.
+
+- rootdir and inifile are now displayed during usage errors to help
+ users diagnose problems such as unexpected ini files which add
+ unknown options being picked up by pytest. Thanks to Pavel Savchenko for
+ bringing the problem to attention in #821 and Bruno Oliveira for the PR.
+
+- Summary bar now is colored yellow for warning
+ situations such as: all tests either were skipped or xpass/xfailed,
+ or no tests were run at all (this is a partial fix for issue500).
+
+- fix issue812: pytest now exits with status code 5 in situations where no
+ tests were run at all, such as the directory given in the command line does
+ not contain any tests or as result of a command line option filters
+ all out all tests (-k for example).
+ Thanks Eric Siegerman (issue812) and Bruno Oliveira for the PR.
+
+- Summary bar now is colored yellow for warning
+ situations such as: all tests either were skipped or xpass/xfailed,
+ or no tests were run at all (related to issue500).
+ Thanks Eric Siegerman.
+
+- New ``testpaths`` ini option: list of directories to search for tests
+ when executing pytest from the root directory. This can be used
+ to speed up test collection when a project has well specified directories
+ for tests, being usually more practical than configuring norecursedirs for
+ all directories that do not contain tests.
+ Thanks to Adrian for idea (#694) and Bruno Oliveira for the PR.
+
+- fix issue713: JUnit XML reports for doctest failures.
+ Thanks Punyashloka Biswal.
+
+- fix issue970: internal pytest warnings now appear as "pytest-warnings" in
+ the terminal instead of "warnings", so it is clear for users that those
+ warnings are from pytest and not from the builtin "warnings" module.
+ Thanks Bruno Oliveira.
+
+- Include setup and teardown in junitxml test durations.
+ Thanks Janne Vanhala.
+
+- fix issue735: assertion failures on debug versions of Python 3.4+
+
+- new option ``--import-mode`` to allow to change test module importing
+ behaviour to append to sys.path instead of prepending. This better allows
+ to run test modules against installated versions of a package even if the
+ package under test has the same import root. In this example::
+
+ testing/__init__.py
+ testing/test_pkg_under_test.py
+ pkg_under_test/
+
+ the tests will run against the installed version
+ of pkg_under_test when ``--import-mode=append`` is used whereas
+ by default they would always pick up the local version. Thanks Holger Krekel.
+
+- pytester: add method ``TmpTestdir.delete_loaded_modules()``, and call it
+ from ``inline_run()`` to allow temporary modules to be reloaded.
+ Thanks Eduardo Schettino.
+
+- internally refactor pluginmanager API and code so that there
+ is a clear distinction between a pytest-agnostic rather simple
+ pluginmanager and the PytestPluginManager which adds a lot of
+ behaviour, among it handling of the local conftest files.
+ In terms of documented methods this is a backward compatible
+ change but it might still break 3rd party plugins which relied on
+ details like especially the pluginmanager.add_shutdown() API.
+ Thanks Holger Krekel.
+
+- pluginmanagement: introduce ``pytest.hookimpl`` and
+ ``pytest.hookspec`` decorators for setting impl/spec
+ specific parameters. This substitutes the previous
+ now deprecated use of ``pytest.mark`` which is meant to
+ contain markers for test functions only.
+
+- write/refine docs for "writing plugins" which now have their
+ own page and are separate from the "using/installing plugins`` page.
+
+- fix issue732: properly unregister plugins from any hook calling
+ sites allowing to have temporary plugins during test execution.
+
+- deprecate and warn about ``__multicall__`` argument in hook
+ implementations. Use the ``hookwrapper`` mechanism instead already
+ introduced with pytest-2.7.
+
+- speed up pytest's own test suite considerably by using inprocess
+ tests by default (testrun can be modified with --runpytest=subprocess
+ to create subprocesses in many places instead). The main
+ APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess"
+ and "runpytest_inprocess" if you need a particular way of running
+ the test. In all cases you get back a RunResult but the inprocess
+ one will also have a "reprec" attribute with the recorded events/reports.
+
+- fix monkeypatch.setattr("x.y", raising=False) to actually not raise
+ if "y" is not a pre-existing attribute. Thanks Florian Bruhin.
+
+- fix issue741: make running output from testdir.run copy/pasteable
+ Thanks Bruno Oliveira.
+
+- add a new ``--noconftest`` argument which ignores all ``conftest.py`` files.
+
+- add ``file`` and ``line`` attributes to JUnit-XML output.
+
+- fix issue890: changed extension of all documentation files from ``txt`` to
+ ``rst``. Thanks to Abhijeet for the PR.
+
+- fix issue714: add ability to apply indirect=True parameter on particular argnames.
+ Thanks Elizaveta239.
+
+- fix issue890: changed extension of all documentation files from ``txt`` to
+ ``rst``. Thanks to Abhijeet for the PR.
+
+- fix issue957: "# doctest: SKIP" option will now register doctests as SKIPPED
+ rather than PASSED.
+ Thanks Thomas Grainger for the report and Bruno Oliveira for the PR.
+
+- issue951: add new record_xml_property fixture, that supports logging
+ additional information on xml output. Thanks David Diaz for the PR.
+
+- issue949: paths after normal options (for example ``-s``, ``-v``, etc) are now
+ properly used to discover ``rootdir`` and ``ini`` files.
+ Thanks Peter Lauri for the report and Bruno Oliveira for the PR.
+
+2.7.3 (compared to 2.7.2)
+=============================
+
+- Allow 'dev', 'rc', or other non-integer version strings in ``importorskip``.
+ Thanks to Eric Hunsberger for the PR.
+
+- fix issue856: consider --color parameter in all outputs (for example
+ --fixtures). Thanks Barney Gale for the report and Bruno Oliveira for the PR.
+
+- fix issue855: passing str objects as ``plugins`` argument to pytest.main
+ is now interpreted as a module name to be imported and registered as a
+ plugin, instead of silently having no effect.
+ Thanks xmo-odoo for the report and Bruno Oliveira for the PR.
+
+- fix issue744: fix for ast.Call changes in Python 3.5+. Thanks
+ Guido van Rossum, Matthias Bussonnier, Stefan Zimmermann and
+ Thomas Kluyver.
+
+- fix issue842: applying markers in classes no longer propagate this markers
+ to superclasses which also have markers.
+ Thanks xmo-odoo for the report and Bruno Oliveira for the PR.
+
+- preserve warning functions after call to pytest.deprecated_call. Thanks
+ Pieter Mulder for PR.
+
+- fix issue854: autouse yield_fixtures defined as class members of
+ unittest.TestCase subclasses now work as expected.
+ Thannks xmo-odoo for the report and Bruno Oliveira for the PR.
+
+- fix issue833: --fixtures now shows all fixtures of collected test files, instead of just the
+ fixtures declared on the first one.
+ Thanks Florian Bruhin for reporting and Bruno Oliveira for the PR.
+
+- fix issue863: skipped tests now report the correct reason when a skip/xfail
+ condition is met when using multiple markers.
+ Thanks Raphael Pierzina for reporting and Bruno Oliveira for the PR.
+
+- optimized tmpdir fixture initialization, which should make test sessions
+ faster (specially when using pytest-xdist). The only visible effect
+ is that now pytest uses a subdirectory in the $TEMP directory for all
+ directories created by this fixture (defaults to $TEMP/pytest-$USER).
+ Thanks Bruno Oliveira for the PR.
+
+2.7.2 (compared to 2.7.1)
+=============================
+
+- fix issue767: pytest.raises value attribute does not contain the exception
+ instance on Python 2.6. Thanks Eric Siegerman for providing the test
+ case and Bruno Oliveira for PR.
+
+- Automatically create directory for junitxml and results log.
+ Thanks Aron Curzon.
+
+- fix issue713: JUnit XML reports for doctest failures.
+ Thanks Punyashloka Biswal.
+
+- fix issue735: assertion failures on debug versions of Python 3.4+
+ Thanks Benjamin Peterson.
+
+- fix issue114: skipif marker reports to internal skipping plugin;
+ Thanks Floris Bruynooghe for reporting and Bruno Oliveira for the PR.
+
+- fix issue748: unittest.SkipTest reports to internal pytest unittest plugin.
+ Thanks Thomas De Schampheleire for reporting and Bruno Oliveira for the PR.
+
+- fix issue718: failed to create representation of sets containing unsortable
+ elements in python 2. Thanks Edison Gustavo Muenz.
+
+- fix issue756, fix issue752 (and similar issues): depend on py-1.4.29
+ which has a refined algorithm for traceback generation.
+
+
+2.7.1 (compared to 2.7.0)
+=============================
+
+- fix issue731: do not get confused by the braces which may be present
+ and unbalanced in an object's repr while collapsing False
+ explanations. Thanks Carl Meyer for the report and test case.
+
+- fix issue553: properly handling inspect.getsourcelines failures in
+ FixtureLookupError which would lead to to an internal error,
+ obfuscating the original problem. Thanks talljosh for initial
+ diagnose/patch and Bruno Oliveira for final patch.
+
+- fix issue660: properly report scope-mismatch-access errors
+ independently from ordering of fixture arguments. Also
+ avoid the pytest internal traceback which does not provide
+ information to the user. Thanks Holger Krekel.
+
+- streamlined and documented release process. Also all versions
+ (in setup.py and documentation generation) are now read
+ from _pytest/__init__.py. Thanks Holger Krekel.
+
+- fixed docs to remove the notion that yield-fixtures are experimental.
+ They are here to stay :) Thanks Bruno Oliveira.
+
+- Support building wheels by using environment markers for the
+ requirements. Thanks Ionel Maries Cristian.
+
+- fixed regression to 2.6.4 which surfaced e.g. in lost stdout capture printing
+ when tests raised SystemExit. Thanks Holger Krekel.
+
+- reintroduced _pytest fixture of the pytester plugin which is used
+ at least by pytest-xdist.
+
+2.7.0 (compared to 2.6.4)
+=============================
+
+- fix issue435: make reload() work when assert rewriting is active.
+ Thanks Daniel Hahler.
+
+- fix issue616: conftest.py files and their contained fixutres are now
+ properly considered for visibility, independently from the exact
+ current working directory and test arguments that are used.
+ Many thanks to Eric Siegerman and his PR235 which contains
+ systematic tests for conftest visibility and now passes.
+ This change also introduces the concept of a ``rootdir`` which
+ is printed as a new pytest header and documented in the pytest
+ customize web page.
+
+- change reporting of "diverted" tests, i.e. tests that are collected
+ in one file but actually come from another (e.g. when tests in a test class
+ come from a base class in a different file). We now show the nodeid
+ and indicate via a postfix the other file.
+
+- add ability to set command line options by environment variable PYTEST_ADDOPTS.
+
+- added documentation on the new pytest-dev teams on bitbucket and
+ github. See https://pytest.org/latest/contributing.html .
+ Thanks to Anatoly for pushing and initial work on this.
+
+- fix issue650: new option ``--docttest-ignore-import-errors`` which
+ will turn import errors in doctests into skips. Thanks Charles Cloud
+ for the complete PR.
+
+- fix issue655: work around different ways that cause python2/3
+ to leak sys.exc_info into fixtures/tests causing failures in 3rd party code
+
+- fix issue615: assertion re-writing did not correctly escape % signs
+ when formatting boolean operations, which tripped over mixing
+ booleans with modulo operators. Thanks to Tom Viner for the report,
+ triaging and fix.
+
+- implement issue351: add ability to specify parametrize ids as a callable
+ to generate custom test ids. Thanks Brianna Laugher for the idea and
+ implementation.
+
+- introduce and document new hookwrapper mechanism useful for plugins
+ which want to wrap the execution of certain hooks for their purposes.
+ This supersedes the undocumented ``__multicall__`` protocol which
+ pytest itself and some external plugins use. Note that pytest-2.8
+ is scheduled to drop supporting the old ``__multicall__``
+ and only support the hookwrapper protocol.
+
+- majorly speed up invocation of plugin hooks
+
+- use hookwrapper mechanism in builtin pytest plugins.
+
+- add a doctest ini option for doctest flags, thanks Holger Peters.
+
+- add note to docs that if you want to mark a parameter and the
+ parameter is a callable, you also need to pass in a reason to disambiguate
+ it from the "decorator" case. Thanks Tom Viner.
+
+- "python_classes" and "python_functions" options now support glob-patterns
+ for test discovery, as discussed in issue600. Thanks Ldiary Translations.
+
+- allow to override parametrized fixtures with non-parametrized ones and vice versa (bubenkoff).
+
+- fix issue463: raise specific error for 'parameterize' misspelling (pfctdayelise).
+
+- On failure, the ``sys.last_value``, ``sys.last_type`` and
+ ``sys.last_traceback`` are set, so that a user can inspect the error
+ via postmortem debugging (almarklein).
+
+2.6.4
+=====
+
+- Improve assertion failure reporting on iterables, by using ndiff and
+ pprint.
+
+- removed outdated japanese docs from source tree.
+
+- docs for "pytest_addhooks" hook. Thanks Bruno Oliveira.
+
+- updated plugin index docs. Thanks Bruno Oliveira.
+
+- fix issue557: with "-k" we only allow the old style "-" for negation
+ at the beginning of strings and even that is deprecated. Use "not" instead.
+ This should allow to pick parametrized tests where "-" appeared in the parameter.
+
+- fix issue604: Escape % character in the assertion message.
+
+- fix issue620: add explanation in the --genscript target about what
+ the binary blob means. Thanks Dinu Gherman.
+
+- fix issue614: fixed pastebin support.
+
+
+- fix issue620: add explanation in the --genscript target about what
+ the binary blob means. Thanks Dinu Gherman.
+
+- fix issue614: fixed pastebin support.
+
+2.6.3
+=====
+
+- fix issue575: xunit-xml was reporting collection errors as failures
+ instead of errors, thanks Oleg Sinyavskiy.
+
+- fix issue582: fix setuptools example, thanks Laszlo Papp and Ronny
+ Pfannschmidt.
+
+- Fix infinite recursion bug when pickling capture.EncodedFile, thanks
+ Uwe Schmitt.
+
+- fix issue589: fix bad interaction with numpy and others when showing
+ exceptions. Check for precise "maximum recursion depth exceed" exception
+ instead of presuming any RuntimeError is that one (implemented in py
+ dep). Thanks Charles Cloud for analysing the issue.
+
+- fix conftest related fixture visibility issue: when running with a
+ CWD outside a test package pytest would get fixture discovery wrong.
+ Thanks to Wolfgang Schnerring for figuring out a reproducable example.
+
+- Introduce pytest_enter_pdb hook (needed e.g. by pytest_timeout to cancel the
+ timeout when interactively entering pdb). Thanks Wolfgang Schnerring.
+
+- check xfail/skip also with non-python function test items. Thanks
+ Floris Bruynooghe.
+
+2.6.2
+=====
+
+- Added function pytest.freeze_includes(), which makes it easy to embed
+ pytest into executables using tools like cx_freeze.
+ See docs for examples and rationale. Thanks Bruno Oliveira.
+
+- Improve assertion rewriting cache invalidation precision.
+
+- fixed issue561: adapt autouse fixture example for python3.
+
+- fixed issue453: assertion rewriting issue with __repr__ containing
+ "\n{", "\n}" and "\n~".
+
+- fix issue560: correctly display code if an "else:" or "finally:" is
+ followed by statements on the same line.
+
+- Fix example in monkeypatch documentation, thanks t-8ch.
+
+- fix issue572: correct tmpdir doc example for python3.
+
+- Do not mark as universal wheel because Python 2.6 is different from
+ other builds due to the extra argparse dependency. Fixes issue566.
+ Thanks sontek.
+
+- Implement issue549: user-provided assertion messages now no longer
+ replace the py.test introspection message but are shown in addition
+ to them.
+
+2.6.1
+=====
+
+- No longer show line numbers in the --verbose output, the output is now
+ purely the nodeid. The line number is still shown in failure reports.
+ Thanks Floris Bruynooghe.
+
+- fix issue437 where assertion rewriting could cause pytest-xdist slaves
+ to collect different tests. Thanks Bruno Oliveira.
+
+- fix issue555: add "errors" attribute to capture-streams to satisfy
+ some distutils and possibly other code accessing sys.stdout.errors.
+
+- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
+
+- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
+ an optional "raises=EXC" argument where EXC can be a single exception
+ or a tuple of exception classes. Thanks David Mohr for the complete
+ PR.
+
+- fix integration of pytest with unittest.mock.patch decorator when
+ it uses the "new" argument. Thanks Nicolas Delaby for test and PR.
+
+- fix issue with detecting conftest files if the arguments contain
+ "::" node id specifications (copy pasted from "-v" output)
+
+- fix issue544 by only removing "@NUM" at the end of "::" separated parts
+ and if the part has an ".py" extension
+
+- don't use py.std import helper, rather import things directly.
+ Thanks Bruno Oliveira.
+
+2.6
+===
+
+- Cache exceptions from fixtures according to their scope (issue 467).
+
+- fix issue537: Avoid importing old assertion reinterpretation code by default.
+
+- fix issue364: shorten and enhance tracebacks representation by default.
+ The new "--tb=auto" option (default) will only display long tracebacks
+ for the first and last entry. You can get the old behaviour of printing
+ all entries as long entries with "--tb=long". Also short entries by
+ default are now printed very similarly to "--tb=native" ones.
+
+- fix issue514: teach assertion reinterpretation about private class attributes
+
+- change -v output to include full node IDs of tests. Users can copy
+ a node ID from a test run, including line number, and use it as a
+ positional argument in order to run only a single test.
+
+- fix issue 475: fail early and comprehensible if calling
+ pytest.raises with wrong exception type.
+
+- fix issue516: tell in getting-started about current dependencies.
+
+- cleanup setup.py a bit and specify supported versions. Thanks Jurko
+ Gospodnetic for the PR.
+
+- change XPASS colour to yellow rather then red when tests are run
+ with -v.
+
+- fix issue473: work around mock putting an unbound method into a class
+ dict when double-patching.
+
+- fix issue498: if a fixture finalizer fails, make sure that
+ the fixture is still invalidated.
+
+- fix issue453: the result of the pytest_assertrepr_compare hook now gets
+ it's newlines escaped so that format_exception does not blow up.
+
+- internal new warning system: pytest will now produce warnings when
+ it detects oddities in your test collection or execution.
+ Warnings are ultimately sent to a new pytest_logwarning hook which is
+ currently only implemented by the terminal plugin which displays
+ warnings in the summary line and shows more details when -rw (report on
+ warnings) is specified.
+
+- change skips into warnings for test classes with an __init__ and
+ callables in test modules which look like a test but are not functions.
+
+- fix issue436: improved finding of initial conftest files from command
+ line arguments by using the result of parse_known_args rather than
+ the previous flaky heuristics. Thanks Marc Abramowitz for tests
+ and initial fixing approaches in this area.
+
+- fix issue #479: properly handle nose/unittest(2) SkipTest exceptions
+ during collection/loading of test modules. Thanks to Marc Schlaich
+ for the complete PR.
+
+- fix issue490: include pytest_load_initial_conftests in documentation
+ and improve docstring.
+
+- fix issue472: clarify that ``pytest.config.getvalue()`` cannot work
+ if it's triggered ahead of command line parsing.
+
+- merge PR123: improved integration with mock.patch decorator on tests.
+
+- fix issue412: messing with stdout/stderr FD-level streams is now
+ captured without crashes.
+
+- fix issue483: trial/py33 works now properly. Thanks Daniel Grana for PR.
+
+- improve example for pytest integration with "python setup.py test"
+ which now has a generic "-a" or "--pytest-args" option where you
+ can pass additional options as a quoted string. Thanks Trevor Bekolay.
+
+- simplified internal capturing mechanism and made it more robust
+ against tests or setups changing FD1/FD2, also better integrated
+ now with pytest.pdb() in single tests.
+
+- improvements to pytest's own test-suite leakage detection, courtesy of PRs
+ from Marc Abramowitz
+
+- fix issue492: avoid leak in test_writeorg. Thanks Marc Abramowitz.
+
+- fix issue493: don't run tests in doc directory with ``python setup.py test``
+ (use tox -e doctesting for that)
+
+- fix issue486: better reporting and handling of early conftest loading failures
+
+- some cleanup and simplification of internal conftest handling.
+
+- work a bit harder to break reference cycles when catching exceptions.
+ Thanks Jurko Gospodnetic.
+
+- fix issue443: fix skip examples to use proper comparison. Thanks Alex
+ Groenholm.
+
+- support nose-style ``__test__`` attribute on modules, classes and
+ functions, including unittest-style Classes. If set to False, the
+ test will not be collected.
+
+- fix issue512: show "<notset>" for arguments which might not be set
+ in monkeypatch plugin. Improves output in documentation.
+
+
+2.5.2
+=====
+
+- fix issue409 -- better interoperate with cx_freeze by not
+ trying to import from collections.abc which causes problems
+ for py27/cx_freeze. Thanks Wolfgang L. for reporting and tracking it down.
+
+- fixed docs and code to use "pytest" instead of "py.test" almost everywhere.
+ Thanks Jurko Gospodnetic for the complete PR.
+
+- fix issue425: mention at end of "py.test -h" that --markers
+ and --fixtures work according to specified test path (or current dir)
+
+- fix issue413: exceptions with unicode attributes are now printed
+ correctly also on python2 and with pytest-xdist runs. (the fix
+ requires py-1.4.20)
+
+- copy, cleanup and integrate py.io capture
+ from pylib 1.4.20.dev2 (rev 13d9af95547e)
+
+- address issue416: clarify docs as to conftest.py loading semantics
+
+- fix issue429: comparing byte strings with non-ascii chars in assert
+ expressions now work better. Thanks Floris Bruynooghe.
+
+- make capfd/capsys.capture private, its unused and shouldnt be exposed
+
+
+2.5.1
+=====
+
+- merge new documentation styling PR from Tobias Bieniek.
+
+- fix issue403: allow parametrize of multiple same-name functions within
+ a collection node. Thanks Andreas Kloeckner and Alex Gaynor for reporting
+ and analysis.
+
+- Allow parameterized fixtures to specify the ID of the parameters by
+ adding an ids argument to pytest.fixture() and pytest.yield_fixture().
+ Thanks Floris Bruynooghe.
+
+- fix issue404 by always using the binary xml escape in the junitxml
+ plugin. Thanks Ronny Pfannschmidt.
+
+- fix issue407: fix addoption docstring to point to argparse instead of
+ optparse. Thanks Daniel D. Wright.
+
+
+
+2.5.0
+=====
+
+- dropped python2.5 from automated release testing of pytest itself
+ which means it's probably going to break soon (but still works
+ with this release we believe).
+
+- simplified and fixed implementation for calling finalizers when
+ parametrized fixtures or function arguments are involved. finalization
+ is now performed lazily at setup time instead of in the "teardown phase".
+ While this might sound odd at first, it helps to ensure that we are
+ correctly handling setup/teardown even in complex code. User-level code
+ should not be affected unless it's implementing the pytest_runtest_teardown
+ hook and expecting certain fixture instances are torn down within (very
+ unlikely and would have been unreliable anyway).
+
+- PR90: add --color=yes|no|auto option to force terminal coloring
+ mode ("auto" is default). Thanks Marc Abramowitz.
+
+- fix issue319 - correctly show unicode in assertion errors. Many
+ thanks to Floris Bruynooghe for the complete PR. Also means
+ we depend on py>=1.4.19 now.
+
+- fix issue396 - correctly sort and finalize class-scoped parametrized
+ tests independently from number of methods on the class.
+
+- refix issue323 in a better way -- parametrization should now never
+ cause Runtime Recursion errors because the underlying algorithm
+ for re-ordering tests per-scope/per-fixture is not recursive
+ anymore (it was tail-call recursive before which could lead
+ to problems for more than >966 non-function scoped parameters).
+
+- fix issue290 - there is preliminary support now for parametrizing
+ with repeated same values (sometimes useful to to test if calling
+ a second time works as with the first time).
+
+- close issue240 - document precisely how pytest module importing
+ works, discuss the two common test directory layouts, and how it
+ interacts with PEP420-namespace packages.
+
+- fix issue246 fix finalizer order to be LIFO on independent fixtures
+ depending on a parametrized higher-than-function scoped fixture.
+ (was quite some effort so please bear with the complexity of this sentence :)
+ Thanks Ralph Schmitt for the precise failure example.
+
+- fix issue244 by implementing special index for parameters to only use
+ indices for paramentrized test ids
+
+- fix issue287 by running all finalizers but saving the exception
+ from the first failing finalizer and re-raising it so teardown will
+ still have failed. We reraise the first failing exception because
+ it might be the cause for other finalizers to fail.
+
+- fix ordering when mock.patch or other standard decorator-wrappings
+ are used with test methods. This fixues issue346 and should
+ help with random "xdist" collection failures. Thanks to
+ Ronny Pfannschmidt and Donald Stufft for helping to isolate it.
+
+- fix issue357 - special case "-k" expressions to allow for
+ filtering with simple strings that are not valid python expressions.
+ Examples: "-k 1.3" matches all tests parametrized with 1.3.
+ "-k None" filters all tests that have "None" in their name
+ and conversely "-k 'not None'".
+ Previously these examples would raise syntax errors.
+
+- fix issue384 by removing the trial support code
+ since the unittest compat enhancements allow
+ trial to handle it on its own
+
+- don't hide an ImportError when importing a plugin produces one.
+ fixes issue375.
+
+- fix issue275 - allow usefixtures and autouse fixtures
+ for running doctest text files.
+
+- fix issue380 by making --resultlog only rely on longrepr instead
+ of the "reprcrash" attribute which only exists sometimes.
+
+- address issue122: allow @pytest.fixture(params=iterator) by exploding
+ into a list early on.
+
+- fix pexpect-3.0 compatibility for pytest's own tests.
+ (fixes issue386)
+
+- allow nested parametrize-value markers, thanks James Lan for the PR.
+
+- fix unicode handling with new monkeypatch.setattr(import_path, value)
+ API. Thanks Rob Dennis. Fixes issue371.
+
+- fix unicode handling with junitxml, fixes issue368.
+
+- In assertion rewriting mode on Python 2, fix the detection of coding
+ cookies. See issue #330.
+
+- make "--runxfail" turn imperative pytest.xfail calls into no ops
+ (it already did neutralize pytest.mark.xfail markers)
+
+- refine pytest / pkg_resources interactions: The AssertionRewritingHook
+ PEP302 compliant loader now registers itself with setuptools/pkg_resources
+ properly so that the pkg_resources.resource_stream method works properly.
+ Fixes issue366. Thanks for the investigations and full PR to Jason R. Coombs.
+
+- pytestconfig fixture is now session-scoped as it is the same object during the
+ whole test run. Fixes issue370.
+
+- avoid one surprising case of marker malfunction/confusion::
+
+ @pytest.mark.some(lambda arg: ...)
+ def test_function():
+
+ would not work correctly because pytest assumes @pytest.mark.some
+ gets a function to be decorated already. We now at least detect if this
+ arg is an lambda and thus the example will work. Thanks Alex Gaynor
+ for bringing it up.
+
+- xfail a test on pypy that checks wrong encoding/ascii (pypy does
+ not error out). fixes issue385.
+
+- internally make varnames() deal with classes's __init__,
+ although it's not needed by pytest itself atm. Also
+ fix caching. Fixes issue376.
+
+- fix issue221 - handle importing of namespace-package with no
+ __init__.py properly.
+
+- refactor internal FixtureRequest handling to avoid monkeypatching.
+ One of the positive user-facing effects is that the "request" object
+ can now be used in closures.
+
+- fixed version comparison in pytest.importskip(modname, minverstring)
+
+- fix issue377 by clarifying in the nose-compat docs that pytest
+ does not duplicate the unittest-API into the "plain" namespace.
+
+- fix verbose reporting for @mock'd test functions
+
+2.4.2
+=====
+
+- on Windows require colorama and a newer py lib so that py.io.TerminalWriter()
+ now uses colorama instead of its own ctypes hacks. (fixes issue365)
+ thanks Paul Moore for bringing it up.
+
+- fix "-k" matching of tests where "repr" and "attr" and other names would
+ cause wrong matches because of an internal implementation quirk
+ (don't ask) which is now properly implemented. fixes issue345.
+
+- avoid tmpdir fixture to create too long filenames especially
+ when parametrization is used (issue354)
+
+- fix pytest-pep8 and pytest-flakes / pytest interactions
+ (collection names in mark plugin was assuming an item always
+ has a function which is not true for those plugins etc.)
+ Thanks Andi Zeidler.
+
+- introduce node.get_marker/node.add_marker API for plugins
+ like pytest-pep8 and pytest-flakes to avoid the messy
+ details of the node.keywords pseudo-dicts. Adapted
+ docs.
+
+- remove attempt to "dup" stdout at startup as it's icky.
+ the normal capturing should catch enough possibilities
+ of tests messing up standard FDs.
+
+- add pluginmanager.do_configure(config) as a link to
+ config.do_configure() for plugin-compatibility
+
+2.4.1
+=====
+
+- When using parser.addoption() unicode arguments to the
+ "type" keyword should also be converted to the respective types.
+ thanks Floris Bruynooghe, @dnozay. (fixes issue360 and issue362)
+
+- fix dotted filename completion when using argcomplete
+ thanks Anthon van der Neuth. (fixes issue361)
+
+- fix regression when a 1-tuple ("arg",) is used for specifying
+ parametrization (the values of the parametrization were passed
+ nested in a tuple). Thanks Donald Stufft.
+
+- merge doc typo fixes, thanks Andy Dirnberger
+
+2.4
+===
+
+known incompatibilities:
+
+- if calling --genscript from python2.7 or above, you only get a
+ standalone script which works on python2.7 or above. Use Python2.6
+ to also get a python2.5 compatible version.
+
+- all xunit-style teardown methods (nose-style, pytest-style,
+ unittest-style) will not be called if the corresponding setup method failed,
+ see issue322 below.
+
+- the pytest_plugin_unregister hook wasn't ever properly called
+ and there is no known implementation of the hook - so it got removed.
+
+- pytest.fixture-decorated functions cannot be generators (i.e. use
+ yield) anymore. This change might be reversed in 2.4.1 if it causes
+ unforeseen real-life issues. However, you can always write and return
+ an inner function/generator and change the fixture consumer to iterate
+ over the returned generator. This change was done in lieu of the new
+ ``pytest.yield_fixture`` decorator, see below.
+
+new features:
+
+- experimentally introduce a new ``pytest.yield_fixture`` decorator
+ which accepts exactly the same parameters as pytest.fixture but
+ mandates a ``yield`` statement instead of a ``return statement`` from
+ fixture functions. This allows direct integration with "with-style"
+ context managers in fixture functions and generally avoids registering
+ of finalization callbacks in favour of treating the "after-yield" as
+ teardown code. Thanks Andreas Pelme, Vladimir Keleshev, Floris
+ Bruynooghe, Ronny Pfannschmidt and many others for discussions.
+
+- allow boolean expression directly with skipif/xfail
+ if a "reason" is also specified. Rework skipping documentation
+ to recommend "condition as booleans" because it prevents surprises
+ when importing markers between modules. Specifying conditions
+ as strings will remain fully supported.
+
+- reporting: color the last line red or green depending if
+ failures/errors occurred or everything passed. thanks Christian
+ Theunert.
+
+- make "import pdb ; pdb.set_trace()" work natively wrt capturing (no
+ "-s" needed anymore), making ``pytest.set_trace()`` a mere shortcut.
+
+- fix issue181: --pdb now also works on collect errors (and
+ on internal errors) . This was implemented by a slight internal
+ refactoring and the introduction of a new hook
+ ``pytest_exception_interact`` hook (see next item).
+
+- fix issue341: introduce new experimental hook for IDEs/terminals to
+ intercept debugging: ``pytest_exception_interact(node, call, report)``.
+
+- new monkeypatch.setattr() variant to provide a shorter
+ invocation for patching out classes/functions from modules:
+
+ monkeypatch.setattr("requests.get", myfunc)
+
+ will replace the "get" function of the "requests" module with ``myfunc``.
+
+- fix issue322: tearDownClass is not run if setUpClass failed. Thanks
+ Mathieu Agopian for the initial fix. Also make all of pytest/nose
+ finalizer mimick the same generic behaviour: if a setupX exists and
+ fails, don't run teardownX. This internally introduces a new method
+ "node.addfinalizer()" helper which can only be called during the setup
+ phase of a node.
+
+- simplify pytest.mark.parametrize() signature: allow to pass a
+ CSV-separated string to specify argnames. For example:
+ ``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])``
+ works as well as the previous:
+ ``pytest.mark.parametrize(("input", "expected"), ...)``.
+
+- add support for setUpModule/tearDownModule detection, thanks Brian Okken.
+
+- integrate tab-completion on options through use of "argcomplete".
+ Thanks Anthon van der Neut for the PR.
+
+- change option names to be hyphen-separated long options but keep the
+ old spelling backward compatible. py.test -h will only show the
+ hyphenated version, for example "--collect-only" but "--collectonly"
+ will remain valid as well (for backward-compat reasons). Many thanks to
+ Anthon van der Neut for the implementation and to Hynek Schlawack for
+ pushing us.
+
+- fix issue 308 - allow to mark/xfail/skip individual parameter sets
+ when parametrizing. Thanks Brianna Laugher.
+
+- call new experimental pytest_load_initial_conftests hook to allow
+ 3rd party plugins to do something before a conftest is loaded.
+
+Bug fixes:
+
+- fix issue358 - capturing options are now parsed more properly
+ by using a new parser.parse_known_args method.
+
+- pytest now uses argparse instead of optparse (thanks Anthon) which
+ means that "argparse" is added as a dependency if installing into python2.6
+ environments or below.
+
+- fix issue333: fix a case of bad unittest/pytest hook interaction.
+
+- PR27: correctly handle nose.SkipTest during collection. Thanks
+ Antonio Cuni, Ronny Pfannschmidt.
+
+- fix issue355: junitxml puts name="pytest" attribute to testsuite tag.
+
+- fix issue336: autouse fixture in plugins should work again.
+
+- fix issue279: improve object comparisons on assertion failure
+ for standard datatypes and recognise collections.abc. Thanks to
+ Brianna Laugher and Mathieu Agopian.
+
+- fix issue317: assertion rewriter support for the is_package method
+
+- fix issue335: document py.code.ExceptionInfo() object returned
+ from pytest.raises(), thanks Mathieu Agopian.
+
+- remove implicit distribute_setup support from setup.py.
+
+- fix issue305: ignore any problems when writing pyc files.
+
+- SO-17664702: call fixture finalizers even if the fixture function
+ partially failed (finalizers would not always be called before)
+
+- fix issue320 - fix class scope for fixtures when mixed with
+ module-level functions. Thanks Anatloy Bubenkoff.
+
+- you can specify "-q" or "-qq" to get different levels of "quieter"
+ reporting (thanks Katarzyna Jachim)
+
+- fix issue300 - Fix order of conftest loading when starting py.test
+ in a subdirectory.
+
+- fix issue323 - sorting of many module-scoped arg parametrizations
+
+- make sessionfinish hooks execute with the same cwd-context as at
+ session start (helps fix plugin behaviour which write output files
+ with relative path such as pytest-cov)
+
+- fix issue316 - properly reference collection hooks in docs
+
+- fix issue 306 - cleanup of -k/-m options to only match markers/test
+ names/keywords respectively. Thanks Wouter van Ackooy.
+
+- improved doctest counting for doctests in python modules --
+ files without any doctest items will not show up anymore
+ and doctest examples are counted as separate test items.
+ thanks Danilo Bellini.
+
+- fix issue245 by depending on the released py-1.4.14
+ which fixes py.io.dupfile to work with files with no
+ mode. Thanks Jason R. Coombs.
+
+- fix junitxml generation when test output contains control characters,
+ addressing issue267, thanks Jaap Broekhuizen
+
+- fix issue338: honor --tb style for setup/teardown errors as well. Thanks Maho.
+
+- fix issue307 - use yaml.safe_load in example, thanks Mark Eichin.
+
+- better parametrize error messages, thanks Brianna Laugher
+
+- pytest_terminal_summary(terminalreporter) hooks can now use
+ ".section(title)" and ".line(msg)" methods to print extra
+ information at the end of a test run.
+
+2.3.5
+=====
+
+- fix issue169: respect --tb=style with setup/teardown errors as well.
+
+- never consider a fixture function for test function collection
+
+- allow re-running of test items / helps to fix pytest-reruntests plugin
+ and also help to keep less fixture/resource references alive
+
+- put captured stdout/stderr into junitxml output even for passing tests
+ (thanks Adam Goucher)
+
+- Issue 265 - integrate nose setup/teardown with setupstate
+ so it doesnt try to teardown if it did not setup
+
+- issue 271 - dont write junitxml on slave nodes
+
+- Issue 274 - dont try to show full doctest example
+ when doctest does not know the example location
+
+- issue 280 - disable assertion rewriting on buggy CPython 2.6.0
+
+- inject "getfixture()" helper to retrieve fixtures from doctests,
+ thanks Andreas Zeidler
+
+- issue 259 - when assertion rewriting, be consistent with the default
+ source encoding of ASCII on Python 2
+
+- issue 251 - report a skip instead of ignoring classes with init
+
+- issue250 unicode/str mixes in parametrization names and values now works
+
+- issue257, assertion-triggered compilation of source ending in a
+ comment line doesn't blow up in python2.5 (fixed through py>=1.4.13.dev6)
+
+- fix --genscript option to generate standalone scripts that also
+ work with python3.3 (importer ordering)
+
+- issue171 - in assertion rewriting, show the repr of some
+ global variables
+
+- fix option help for "-k"
+
+- move long description of distribution into README.rst
+
+- improve docstring for metafunc.parametrize()
+
+- fix bug where using capsys with pytest.set_trace() in a test
+ function would break when looking at capsys.readouterr()
+
+- allow to specify prefixes starting with "_" when
+ customizing python_functions test discovery. (thanks Graham Horler)
+
+- improve PYTEST_DEBUG tracing output by puting
+ extra data on a new lines with additional indent
+
+- ensure OutcomeExceptions like skip/fail have initialized exception attributes
+
+- issue 260 - don't use nose special setup on plain unittest cases
+
+- fix issue134 - print the collect errors that prevent running specified test items
+
+- fix issue266 - accept unicode in MarkEvaluator expressions
+
+2.3.4
+=====
+
+- yielded test functions will now have autouse-fixtures active but
+ cannot accept fixtures as funcargs - it's anyway recommended to
+ rather use the post-2.0 parametrize features instead of yield, see:
+ http://pytest.org/latest/example/parametrize.html
+- fix autouse-issue where autouse-fixtures would not be discovered
+ if defined in a a/conftest.py file and tests in a/tests/test_some.py
+- fix issue226 - LIFO ordering for fixture teardowns
+- fix issue224 - invocations with >256 char arguments now work
+- fix issue91 - add/discuss package/directory level setups in example
+- allow to dynamically define markers via
+ item.keywords[...]=assignment integrating with "-m" option
+- make "-k" accept an expressions the same as with "-m" so that one
+ can write: -k "name1 or name2" etc. This is a slight incompatibility
+ if you used special syntax like "TestClass.test_method" which you now
+ need to write as -k "TestClass and test_method" to match a certain
+ method in a certain test class.
+
+2.3.3
+=====
+
+- fix issue214 - parse modules that contain special objects like e. g.
+ flask's request object which blows up on getattr access if no request
+ is active. thanks Thomas Waldmann.
+
+- fix issue213 - allow to parametrize with values like numpy arrays that
+ do not support an __eq__ operator
+
+- fix issue215 - split test_python.org into multiple files
+
+- fix issue148 - @unittest.skip on classes is now recognized and avoids
+ calling setUpClass/tearDownClass, thanks Pavel Repin
+
+- fix issue209 - reintroduce python2.4 support by depending on newer
+ pylib which re-introduced statement-finding for pre-AST interpreters
+
+- nose support: only call setup if its a callable, thanks Andrew
+ Taumoefolau
+
+- fix issue219 - add py2.4-3.3 classifiers to TROVE list
+
+- in tracebacks *,** arg values are now shown next to normal arguments
+ (thanks Manuel Jacob)
+
+- fix issue217 - support mock.patch with pytest's fixtures - note that
+ you need either mock-1.0.1 or the python3.3 builtin unittest.mock.
+
+- fix issue127 - improve documentation for pytest_addoption() and
+ add a ``config.getoption(name)`` helper function for consistency.
+
+2.3.2
+=====
+
+- fix issue208 and fix issue29 use new py version to avoid long pauses
+ when printing tracebacks in long modules
+
+- fix issue205 - conftests in subdirs customizing
+ pytest_pycollect_makemodule and pytest_pycollect_makeitem
+ now work properly
+
+- fix teardown-ordering for parametrized setups
+
+- fix issue127 - better documentation for pytest_addoption
+ and related objects.
+
+- fix unittest behaviour: TestCase.runtest only called if there are
+ test methods defined
+
+- improve trial support: don't collect its empty
+ unittest.TestCase.runTest() method
+
+- "python setup.py test" now works with pytest itself
+
+- fix/improve internal/packaging related bits:
+
+ - exception message check of test_nose.py now passes on python33 as well
+
+ - issue206 - fix test_assertrewrite.py to work when a global
+ PYTHONDONTWRITEBYTECODE=1 is present
+
+ - add tox.ini to pytest distribution so that ignore-dirs and others config
+ bits are properly distributed for maintainers who run pytest-own tests
+
+2.3.1
+=====
+
+- fix issue202 - fix regression: using "self" from fixture functions now
+ works as expected (it's the same "self" instance that a test method
+ which uses the fixture sees)
+
+- skip pexpect using tests (test_pdb.py mostly) on freebsd* systems
+ due to pexpect not supporting it properly (hanging)
+
+- link to web pages from --markers output which provides help for
+ pytest.mark.* usage.
+
+2.3.0
+=====
+
+- fix issue202 - better automatic names for parametrized test functions
+- fix issue139 - introduce @pytest.fixture which allows direct scoping
+ and parametrization of funcarg factories.
+- fix issue198 - conftest fixtures were not found on windows32 in some
+ circumstances with nested directory structures due to path manipulation issues
+- fix issue193 skip test functions with were parametrized with empty
+ parameter sets
+- fix python3.3 compat, mostly reporting bits that previously depended
+ on dict ordering
+- introduce re-ordering of tests by resource and parametrization setup
+ which takes precedence to the usual file-ordering
+- fix issue185 monkeypatching time.time does not cause pytest to fail
+- fix issue172 duplicate call of pytest.fixture decoratored setup_module
+ functions
+- fix junitxml=path construction so that if tests change the
+ current working directory and the path is a relative path
+ it is constructed correctly from the original current working dir.
+- fix "python setup.py test" example to cause a proper "errno" return
+- fix issue165 - fix broken doc links and mention stackoverflow for FAQ
+- catch unicode-issues when writing failure representations
+ to terminal to prevent the whole session from crashing
+- fix xfail/skip confusion: a skip-mark or an imperative pytest.skip
+ will now take precedence before xfail-markers because we
+ can't determine xfail/xpass status in case of a skip. see also:
+ http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
+
+- always report installed 3rd party plugins in the header of a test run
+
+- fix issue160: a failing setup of an xfail-marked tests should
+ be reported as xfail (not xpass)
+
+- fix issue128: show captured output when capsys/capfd are used
+
+- fix issue179: propperly show the dependency chain of factories
+
+- pluginmanager.register(...) now raises ValueError if the
+ plugin has been already registered or the name is taken
+
+- fix issue159: improve http://pytest.org/latest/faq.html
+ especially with respect to the "magic" history, also mention
+ pytest-django, trial and unittest integration.
+
+- make request.keywords and node.keywords writable. All descendant
+ collection nodes will see keyword values. Keywords are dictionaries
+ containing markers and other info.
+
+- fix issue 178: xml binary escapes are now wrapped in py.xml.raw
+
+- fix issue 176: correctly catch the builtin AssertionError
+ even when we replaced AssertionError with a subclass on the
+ python level
+
+- factory discovery no longer fails with magic global callables
+ that provide no sane __code__ object (mock.call for example)
+
+- fix issue 182: testdir.inprocess_run now considers passed plugins
+
+- fix issue 188: ensure sys.exc_info is clear on python2
+ before calling into a test
+
+- fix issue 191: add unittest TestCase runTest method support
+- fix issue 156: monkeypatch correctly handles class level descriptors
+
+- reporting refinements:
+
+ - pytest_report_header now receives a "startdir" so that
+ you can use startdir.bestrelpath(yourpath) to show
+ nice relative path
+
+ - allow plugins to implement both pytest_report_header and
+ pytest_sessionstart (sessionstart is invoked first).
+
+ - don't show deselected reason line if there is none
+
+ - py.test -vv will show all of assert comparisations instead of truncating
+
+2.2.4
+=====
+
+- fix error message for rewritten assertions involving the % operator
+- fix issue 126: correctly match all invalid xml characters for junitxml
+ binary escape
+- fix issue with unittest: now @unittest.expectedFailure markers should
+ be processed correctly (you can also use @pytest.mark markers)
+- document integration with the extended distribute/setuptools test commands
+- fix issue 140: propperly get the real functions
+ of bound classmethods for setup/teardown_class
+- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
+- fix issue #143: call unconfigure/sessionfinish always when
+ configure/sessionstart where called
+- fix issue #144: better mangle test ids to junitxml classnames
+- upgrade distribute_setup.py to 0.6.27
+
+2.2.3
+=====
+
+- fix uploaded package to only include neccesary files
+
+2.2.2
+=====
+
+- fix issue101: wrong args to unittest.TestCase test function now
+ produce better output
+- fix issue102: report more useful errors and hints for when a
+ test directory was renamed and some pyc/__pycache__ remain
+- fix issue106: allow parametrize to be applied multiple times
+ e.g. from module, class and at function level.
+- fix issue107: actually perform session scope finalization
+- don't check in parametrize if indirect parameters are funcarg names
+- add chdir method to monkeypatch funcarg
+- fix crash resulting from calling monkeypatch undo a second time
+- fix issue115: make --collectonly robust against early failure
+ (missing files/directories)
+- "-qq --collectonly" now shows only files and the number of tests in them
+- "-q --collectonly" now shows test ids
+- allow adding of attributes to test reports such that it also works
+ with distributed testing (no upgrade of pytest-xdist needed)
+
+2.2.1
+=====
+
+- fix issue99 (in pytest and py) internallerrors with resultlog now
+ produce better output - fixed by normalizing pytest_internalerror
+ input arguments.
+- fix issue97 / traceback issues (in pytest and py) improve traceback output
+ in conjunction with jinja2 and cython which hack tracebacks
+- fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns":
+ the final test in a test node will now run its teardown directly
+ instead of waiting for the end of the session. Thanks Dave Hunt for
+ the good reporting and feedback. The pytest_runtest_protocol as well
+ as the pytest_runtest_teardown hooks now have "nextitem" available
+ which will be None indicating the end of the test run.
+- fix collection crash due to unknown-source collected items, thanks
+ to Ralf Schmitt (fixed by depending on a more recent pylib)
+
+2.2.0
+=====
+
+- fix issue90: introduce eager tearing down of test items so that
+ teardown function are called earlier.
+- add an all-powerful metafunc.parametrize function which allows to
+ parametrize test function arguments in multiple steps and therefore
+ from indepdenent plugins and palces.
+- add a @pytest.mark.parametrize helper which allows to easily
+ call a test function with different argument values
+- Add examples to the "parametrize" example page, including a quick port
+ of Test scenarios and the new parametrize function and decorator.
+- introduce registration for "pytest.mark.*" helpers via ini-files
+ or through plugin hooks. Also introduce a "--strict" option which
+ will treat unregistered markers as errors
+ allowing to avoid typos and maintain a well described set of markers
+ for your test suite. See exaples at http://pytest.org/latest/mark.html
+ and its links.
+- issue50: introduce "-m marker" option to select tests based on markers
+ (this is a stricter and more predictable version of '-k' in that "-m"
+ only matches complete markers and has more obvious rules for and/or
+ semantics.
+- new feature to help optimizing the speed of your tests:
+ --durations=N option for displaying N slowest test calls
+ and setup/teardown methods.
+- fix issue87: --pastebin now works with python3
+- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
+- fix and cleanup pytest's own test suite to not leak FDs
+- fix issue83: link to generated funcarg list
+- fix issue74: pyarg module names are now checked against imp.find_module false positives
+- fix compatibility with twisted/trial-11.1.0 use cases
+- simplify Node.listchain
+- simplify junitxml output code by relying on py.xml
+- add support for skip properties on unittest classes and functions
+
+2.1.3
+=====
+
+- fix issue79: assertion rewriting failed on some comparisons in boolops
+- correctly handle zero length arguments (a la pytest '')
+- fix issue67 / junitxml now contains correct test durations, thanks ronny
+- fix issue75 / skipping test failure on jython
+- fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests
+
+2.1.2
+=====
+
+- fix assertion rewriting on files with windows newlines on some Python versions
+- refine test discovery by package/module name (--pyargs), thanks Florian Mayer
+- fix issue69 / assertion rewriting fixed on some boolean operations
+- fix issue68 / packages now work with assertion rewriting
+- fix issue66: use different assertion rewriting caches when the -O option is passed
+- don't try assertion rewriting on Jython, use reinterp
+
+2.1.1
+=====
+
+- fix issue64 / pytest.set_trace now works within pytest_generate_tests hooks
+- fix issue60 / fix error conditions involving the creation of __pycache__
+- fix issue63 / assertion rewriting on inserts involving strings containing '%'
+- fix assertion rewriting on calls with a ** arg
+- don't cache rewritten modules if bytecode generation is disabled
+- fix assertion rewriting in read-only directories
+- fix issue59: provide system-out/err tags for junitxml output
+- fix issue61: assertion rewriting on boolean operations with 3 or more operands
+- you can now build a man page with "cd doc ; make man"
+
+2.1.0
+=====
+
+- fix issue53 call nosestyle setup functions with correct ordering
+- fix issue58 and issue59: new assertion code fixes
+- merge Benjamin's assertionrewrite branch: now assertions
+ for test modules on python 2.6 and above are done by rewriting
+ the AST and saving the pyc file before the test module is imported.
+ see doc/assert.txt for more info.
+- fix issue43: improve doctests with better traceback reporting on
+ unexpected exceptions
+- fix issue47: timing output in junitxml for test cases is now correct
+- fix issue48: typo in MarkInfo repr leading to exception
+- fix issue49: avoid confusing error when initizaliation partially fails
+- fix issue44: env/username expansion for junitxml file path
+- show releaselevel information in test runs for pypy
+- reworked doc pages for better navigation and PDF generation
+- report KeyboardInterrupt even if interrupted during session startup
+- fix issue 35 - provide PDF doc version and download link from index page
+
+2.0.3
+=====
+
+- fix issue38: nicer tracebacks on calls to hooks, particularly early
+ configure/sessionstart ones
+
+- fix missing skip reason/meta information in junitxml files, reported
+ via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html
+
+- fix issue34: avoid collection failure with "test" prefixed classes
+ deriving from object.
+
+- don't require zlib (and other libs) for genscript plugin without
+ --genscript actually being used.
+
+- speed up skips (by not doing a full traceback represenation
+ internally)
+
+- fix issue37: avoid invalid characters in junitxml's output
+
+2.0.2
+=====
+
+- tackle issue32 - speed up test runs of very quick test functions
+ by reducing the relative overhead
+
+- fix issue30 - extended xfail/skipif handling and improved reporting.
+ If you have a syntax error in your skip/xfail
+ expressions you now get nice error reports.
+
+ Also you can now access module globals from xfail/skipif
+ expressions so that this for example works now::
+
+ import pytest
+ import mymodule
+ @pytest.mark.skipif("mymodule.__version__[0] == "1")
+ def test_function():
+ pass
+
+ This will not run the test function if the module's version string
+ does not start with a "1". Note that specifying a string instead
+ of a boolean expressions allows py.test to report meaningful information
+ when summarizing a test run as to what conditions lead to skipping
+ (or xfail-ing) tests.
+
+- fix issue28 - setup_method and pytest_generate_tests work together
+ The setup_method fixture method now gets called also for
+ test function invocations generated from the pytest_generate_tests
+ hook.
+
+- fix issue27 - collectonly and keyword-selection (-k) now work together
+ Also, if you do "py.test --collectonly -q" you now get a flat list
+ of test ids that you can use to paste to the py.test commandline
+ in order to execute a particular test.
+
+- fix issue25 avoid reported problems with --pdb and python3.2/encodings output
+
+- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP
+ Starting with Python3.2 os.symlink may be supported. By requiring
+ a newer py lib version the py.path.local() implementation acknowledges
+ this.
+
+- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular
+ thanks to Laura Creighton who also revieved parts of the documentation.
+
+- fix slighly wrong output of verbose progress reporting for classes
+ (thanks Amaury)
+
+- more precise (avoiding of) deprecation warnings for node.Class|Function accesses
+
+- avoid std unittest assertion helper code in tracebacks (thanks Ronny)
+
+2.0.1
+=====
+
+- refine and unify initial capturing so that it works nicely
+ even if the logging module is used on an early-loaded conftest.py
+ file or plugin.
+- allow to omit "()" in test ids to allow for uniform test ids
+ as produced by Alfredo's nice pytest.vim plugin.
+- fix issue12 - show plugin versions with "--version" and
+ "--traceconfig" and also document how to add extra information
+ to reporting test header
+- fix issue17 (import-* reporting issue on python3) by
+ requiring py>1.4.0 (1.4.1 is going to include it)
+- fix issue10 (numpy arrays truth checking) by refining
+ assertion interpretation in py lib
+- fix issue15: make nose compatibility tests compatible
+ with python3 (now that nose-1.0 supports python3)
+- remove somewhat surprising "same-conftest" detection because
+ it ignores conftest.py when they appear in several subdirs.
+- improve assertions ("not in"), thanks Floris Bruynooghe
+- improve behaviour/warnings when running on top of "python -OO"
+ (assertions and docstrings are turned off, leading to potential
+ false positives)
+- introduce a pytest_cmdline_processargs(args) hook
+ to allow dynamic computation of command line arguments.
+ This fixes a regression because py.test prior to 2.0
+ allowed to set command line options from conftest.py
+ files which so far pytest-2.0 only allowed from ini-files now.
+- fix issue7: assert failures in doctest modules.
+ unexpected failures in doctests will not generally
+ show nicer, i.e. within the doctest failing context.
+- fix issue9: setup/teardown functions for an xfail-marked
+ test will report as xfail if they fail but report as normally
+ passing (not xpassing) if they succeed. This only is true
+ for "direct" setup/teardown invocations because teardown_class/
+ teardown_module cannot closely relate to a single test.
+- fix issue14: no logging errors at process exit
+- refinements to "collecting" output on non-ttys
+- refine internal plugin registration and --traceconfig output
+- introduce a mechanism to prevent/unregister plugins from the
+ command line, see http://pytest.org/plugins.html#cmdunregister
+- activate resultlog plugin by default
+- fix regression wrt yielded tests which due to the
+ collection-before-running semantics were not
+ setup as with pytest 1.3.4. Note, however, that
+ the recommended and much cleaner way to do test
+ parametraization remains the "pytest_generate_tests"
+ mechanism, see the docs.
+
+2.0.0
+=====
+
+- pytest-2.0 is now its own package and depends on pylib-2.0
+- new ability: python -m pytest / python -m pytest.main ability
+- new python invcation: pytest.main(args, plugins) to load
+ some custom plugins early.
+- try harder to run unittest test suites in a more compatible manner
+ by deferring setup/teardown semantics to the unittest package.
+ also work harder to run twisted/trial and Django tests which
+ should now basically work by default.
+- introduce a new way to set config options via ini-style files,
+ by default setup.cfg and tox.ini files are searched. The old
+ ways (certain environment variables, dynamic conftest.py reading
+ is removed).
+- add a new "-q" option which decreases verbosity and prints a more
+ nose/unittest-style "dot" output.
+- fix issue135 - marks now work with unittest test cases as well
+- fix issue126 - introduce py.test.set_trace() to trace execution via
+ PDB during the running of tests even if capturing is ongoing.
+- fix issue123 - new "python -m py.test" invocation for py.test
+ (requires Python 2.5 or above)
+- fix issue124 - make reporting more resilient against tests opening
+ files on filedescriptor 1 (stdout).
+- fix issue109 - sibling conftest.py files will not be loaded.
+ (and Directory collectors cannot be customized anymore from a Directory's
+ conftest.py - this needs to happen at least one level up).
+- introduce (customizable) assertion failure representations and enhance
+ output on assertion failures for comparisons and other cases (Floris Bruynooghe)
+- nose-plugin: pass through type-signature failures in setup/teardown
+ functions instead of not calling them (Ed Singleton)
+- remove py.test.collect.Directory (follows from a major refactoring
+ and simplification of the collection process)
+- majorly reduce py.test core code, shift function/python testing to own plugin
+- fix issue88 (finding custom test nodes from command line arg)
+- refine 'tmpdir' creation, will now create basenames better associated
+ with test names (thanks Ronny)
+- "xpass" (unexpected pass) tests don't cause exitcode!=0
+- fix issue131 / issue60 - importing doctests in __init__ files used as namespace packages
+- fix issue93 stdout/stderr is captured while importing conftest.py
+- fix bug: unittest collected functions now also can have "pytestmark"
+ applied at class/module level
+- add ability to use "class" level for cached_setup helper
+- fix strangeness: mark.* objects are now immutable, create new instances
+
+1.3.4
+=====
+
+- fix issue111: improve install documentation for windows
+- fix issue119: fix custom collectability of __init__.py as a module
+- fix issue116: --doctestmodules work with __init__.py files as well
+- fix issue115: unify internal exception passthrough/catching/GeneratorExit
+- fix issue118: new --tb=native for presenting cpython-standard exceptions
+
+1.3.3
+=====
+
+- fix issue113: assertion representation problem with triple-quoted strings
+ (and possibly other cases)
+- make conftest loading detect that a conftest file with the same
+ content was already loaded, avoids surprises in nested directory structures
+ which can be produced e.g. by Hudson. It probably removes the need to use
+ --confcutdir in most cases.
+- fix terminal coloring for win32
+ (thanks Michael Foord for reporting)
+- fix weirdness: make terminal width detection work on stdout instead of stdin
+ (thanks Armin Ronacher for reporting)
+- remove trailing whitespace in all py/text distribution files
+
+1.3.2
+=====
+
+**New features**
+
+- fix issue103: introduce py.test.raises as context manager, examples::
+
+ with py.test.raises(ZeroDivisionError):
+ x = 0
+ 1 / x
+
+ with py.test.raises(RuntimeError) as excinfo:
+ call_something()
+
+ # you may do extra checks on excinfo.value|type|traceback here
+
+ (thanks Ronny Pfannschmidt)
+
+- Funcarg factories can now dynamically apply a marker to a
+ test invocation. This is for example useful if a factory
+ provides parameters to a test which are expected-to-fail::
+
+ def pytest_funcarg__arg(request):
+ request.applymarker(py.test.mark.xfail(reason="flaky config"))
+ ...
+
+ def test_function(arg):
+ ...
+
+- improved error reporting on collection and import errors. This makes
+ use of a more general mechanism, namely that for custom test item/collect
+ nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
+ override it to return a string error representation of your choice
+ which is going to be reported as a (red) string.
+
+- introduce '--junitprefix=STR' option to prepend a prefix
+ to all reports in the junitxml file.
+
+**Bug fixes**
+
+- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
+ to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
+ you can properly check for their existence in a cross-python manner).
+- refine --pdb: ignore xfailed tests, unify its TB-reporting and
+ don't display failures again at the end.
+- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
+- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
+- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
+- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
+- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
+- fix py.code.compile(source) to generate unique filenames
+- fix assertion re-interp problems on PyPy, by defering code
+ compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
+- fix py.path.local.pyimport() to work with directories
+- streamline py.path.local.mkdtemp implementation and usage
+- don't print empty lines when showing junitxml-filename
+- add optional boolean ignore_errors parameter to py.path.local.remove
+- fix terminal writing on win32/python2.4
+- py.process.cmdexec() now tries harder to return properly encoded unicode objects
+ on all python versions
+- install plain py.test/py.which scripts also for Jython, this helps to
+ get canonical script paths in virtualenv situations
+- make path.bestrelpath(path) return ".", note that when calling
+ X.bestrelpath the assumption is that X is a directory.
+- make initial conftest discovery ignore "--" prefixed arguments
+- fix resultlog plugin when used in an multicpu/multihost xdist situation
+ (thanks Jakub Gustak)
+- perform distributed testing related reporting in the xdist-plugin
+ rather than having dist-related code in the generic py.test
+ distribution
+- fix homedir detection on Windows
+- ship distribute_setup.py version 0.6.13
+
+1.3.1
+=====
+
+**New features**
+
+- issue91: introduce new py.test.xfail(reason) helper
+ to imperatively mark a test as expected to fail. Can
+ be used from within setup and test functions. This is
+ useful especially for parametrized tests when certain
+ configurations are expected-to-fail. In this case the
+ declarative approach with the @py.test.mark.xfail cannot
+ be used as it would mark all configurations as xfail.
+
+- issue102: introduce new --maxfail=NUM option to stop
+ test runs after NUM failures. This is a generalization
+ of the '-x' or '--exitfirst' option which is now equivalent
+ to '--maxfail=1'. Both '-x' and '--maxfail' will
+ now also print a line near the end indicating the Interruption.
+
+- issue89: allow py.test.mark decorators to be used on classes
+ (class decorators were introduced with python2.6) and
+ also allow to have multiple markers applied at class/module level
+ by specifying a list.
+
+- improve and refine letter reporting in the progress bar:
+ . pass
+ f failed test
+ s skipped tests (reminder: use for dependency/platform mismatch only)
+ x xfailed test (test that was expected to fail)
+ X xpassed test (test that was expected to fail but passed)
+
+ You can use any combination of 'fsxX' with the '-r' extended
+ reporting option. The xfail/xpass results will show up as
+ skipped tests in the junitxml output - which also fixes
+ issue99.
+
+- make py.test.cmdline.main() return the exitstatus instead of raising
+ SystemExit and also allow it to be called multiple times. This of
+ course requires that your application and tests are properly teared
+ down and don't have global state.
+
+**Bug Fixes**
+
+- improved traceback presentation:
+ - improved and unified reporting for "--tb=short" option
+ - Errors during test module imports are much shorter, (using --tb=short style)
+ - raises shows shorter more relevant tracebacks
+ - --fulltrace now more systematically makes traces longer / inhibits cutting
+
+- improve support for raises and other dynamically compiled code by
+ manipulating python's linecache.cache instead of the previous
+ rather hacky way of creating custom code objects. This makes
+ it seemlessly work on Jython and PyPy where it previously didn't.
+
+- fix issue96: make capturing more resilient against Control-C
+ interruptions (involved somewhat substantial refactoring
+ to the underlying capturing functionality to avoid race
+ conditions).
+
+- fix chaining of conditional skipif/xfail decorators - so it works now
+ as expected to use multiple @py.test.mark.skipif(condition) decorators,
+ including specific reporting which of the conditions lead to skipping.
+
+- fix issue95: late-import zlib so that it's not required
+ for general py.test startup.
+
+- fix issue94: make reporting more robust against bogus source code
+ (and internally be more careful when presenting unexpected byte sequences)
+
+
+1.3.0
+=====
+
+- deprecate --report option in favour of a new shorter and easier to
+ remember -r option: it takes a string argument consisting of any
+ combination of 'xfsX' characters. They relate to the single chars
+ you see during the dotted progress printing and will print an extra line
+ per test at the end of the test run. This extra line indicates the exact
+ position or test ID that you directly paste to the py.test cmdline in order
+ to re-run a particular test.
+
+- allow external plugins to register new hooks via the new
+ pytest_addhooks(pluginmanager) hook. The new release of
+ the pytest-xdist plugin for distributed and looponfailing
+ testing requires this feature.
+
+- add a new pytest_ignore_collect(path, config) hook to allow projects and
+ plugins to define exclusion behaviour for their directory structure -
+ for example you may define in a conftest.py this method::
+
+ def pytest_ignore_collect(path):
+ return path.check(link=1)
+
+ to prevent even a collection try of any tests in symlinked dirs.
+
+- new pytest_pycollect_makemodule(path, parent) hook for
+ allowing customization of the Module collection object for a
+ matching test module.
+
+- extend and refine xfail mechanism:
+ ``@py.test.mark.xfail(run=False)`` do not run the decorated test
+ ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
+ specifiying ``--runxfail`` on command line virtually ignores xfail markers
+
+- expose (previously internal) commonly useful methods:
+ py.io.get_terminal_with() -> return terminal width
+ py.io.ansi_print(...) -> print colored/bold text on linux/win32
+ py.io.saferepr(obj) -> return limited representation string
+
+- expose test outcome related exceptions as py.test.skip.Exception,
+ py.test.raises.Exception etc., useful mostly for plugins
+ doing special outcome interpretation/tweaking
+
+- (issue85) fix junitxml plugin to handle tests with non-ascii output
+
+- fix/refine python3 compatibility (thanks Benjamin Peterson)
+
+- fixes for making the jython/win32 combination work, note however:
+ jython2.5.1/win32 does not provide a command line launcher, see
+ http://bugs.jython.org/issue1491 . See pylib install documentation
+ for how to work around.
+
+- fixes for handling of unicode exception values and unprintable objects
+
+- (issue87) fix unboundlocal error in assertionold code
+
+- (issue86) improve documentation for looponfailing
+
+- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
+
+- ship distribute_setup.py version 0.6.10
+
+- added links to the new capturelog and coverage plugins
+
+
+1.2.0
+=====
+
+- refined usage and options for "py.cleanup"::
+
+ py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
+ py.cleanup -e .swp -e .cache # also remove files with these extensions
+ py.cleanup -s # remove "build" and "dist" directory next to setup.py files
+ py.cleanup -d # also remove empty directories
+ py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
+ py.cleanup -n # dry run, only show what would be removed
+
+- add a new option "py.test --funcargs" which shows available funcargs
+ and their help strings (docstrings on their respective factory function)
+ for a given test path
+
+- display a short and concise traceback if a funcarg lookup fails
+
+- early-load "conftest.py" files in non-dot first-level sub directories.
+ allows to conveniently keep and access test-related options in a ``test``
+ subdir and still add command line options.
+
+- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
+
+- fix issue78: always call python-level teardown functions even if the
+ according setup failed. This includes refinements for calling setup_module/class functions
+ which will now only be called once instead of the previous behaviour where they'd be called
+ multiple times if they raise an exception (including a Skipped exception). Any exception
+ will be re-corded and associated with all tests in the according module/class scope.
+
+- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
+
+- fix pdb debugging to be in the correct frame on raises-related errors
+
+- update apipkg.py to fix an issue where recursive imports might
+ unnecessarily break importing
+
+- fix plugin links
+
+1.1.1
+=====
+
+- moved dist/looponfailing from py.test core into a new
+ separately released pytest-xdist plugin.
+
+- new junitxml plugin: --junitxml=path will generate a junit style xml file
+ which is processable e.g. by the Hudson CI system.
+
+- new option: --genscript=path will generate a standalone py.test script
+ which will not need any libraries installed. thanks to Ralf Schmitt.
+
+- new option: --ignore will prevent specified path from collection.
+ Can be specified multiple times.
+
+- new option: --confcutdir=dir will make py.test only consider conftest
+ files that are relative to the specified dir.
+
+- new funcarg: "pytestconfig" is the pytest config object for access
+ to command line args and can now be easily used in a test.
+
+- install ``py.test`` and ``py.which`` with a ``-$VERSION`` suffix to
+ disambiguate between Python3, python2.X, Jython and PyPy installed versions.
+
+- new "pytestconfig" funcarg allows access to test config object
+
+- new "pytest_report_header" hook can return additional lines
+ to be displayed at the header of a test run.
+
+- (experimental) allow "py.test path::name1::name2::..." for pointing
+ to a test within a test collection directly. This might eventually
+ evolve as a full substitute to "-k" specifications.
+
+- streamlined plugin loading: order is now as documented in
+ customize.html: setuptools, ENV, commandline, conftest.
+ also setuptools entry point names are turned to canonical namees ("pytest_*")
+
+- automatically skip tests that need 'capfd' but have no os.dup
+
+- allow pytest_generate_tests to be defined in classes as well
+
+- deprecate usage of 'disabled' attribute in favour of pytestmark
+- deprecate definition of Directory, Module, Class and Function nodes
+ in conftest.py files. Use pytest collect hooks instead.
+
+- collection/item node specific runtest/collect hooks are only called exactly
+ on matching conftest.py files, i.e. ones which are exactly below
+ the filesystem path of an item
+
+- change: the first pytest_collect_directory hook to return something
+ will now prevent further hooks to be called.
+
+- change: figleaf plugin now requires --figleaf to run. Also
+ change its long command line options to be a bit shorter (see py.test -h).
+
+- change: pytest doctest plugin is now enabled by default and has a
+ new option --doctest-glob to set a pattern for file matches.
+
+- change: remove internal py._* helper vars, only keep py._pydir
+
+- robustify capturing to survive if custom pytest_runtest_setup
+ code failed and prevented the capturing setup code from running.
+
+- make py.test.* helpers provided by default plugins visible early -
+ works transparently both for pydoc and for interactive sessions
+ which will regularly see e.g. py.test.mark and py.test.importorskip.
+
+- simplify internal plugin manager machinery
+- simplify internal collection tree by introducing a RootCollector node
+
+- fix assert reinterpreation that sees a call containing "keyword=..."
+
+- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
+ hooks on slaves during dist-testing, report module/session teardown
+ hooks correctly.
+
+- fix issue65: properly handle dist-testing if no
+ execnet/py lib installed remotely.
+
+- skip some install-tests if no execnet is available
+
+- fix docs, fix internal bin/ script generation
+
+
+1.1.0
+=====
+
+- introduce automatic plugin registration via 'pytest11'
+ entrypoints via setuptools' pkg_resources.iter_entry_points
+
+- fix py.test dist-testing to work with execnet >= 1.0.0b4
+
+- re-introduce py.test.cmdline.main() for better backward compatibility
+
+- svn paths: fix a bug with path.check(versioned=True) for svn paths,
+ allow '%' in svn paths, make svnwc.update() default to interactive mode
+ like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
+
+- refine distributed tarball to contain test and no pyc files
+
+- try harder to have deprecation warnings for py.compat.* accesses
+ report a correct location
+
+1.0.3
+=====
+
+* adjust and improve docs
+
+* remove py.rest tool and internal namespace - it was
+ never really advertised and can still be used with
+ the old release if needed. If there is interest
+ it could be revived into its own tool i guess.
+
+* fix issue48 and issue59: raise an Error if the module
+ from an imported test file does not seem to come from
+ the filepath - avoids "same-name" confusion that has
+ been reported repeatedly
+
+* merged Ronny's nose-compatibility hacks: now
+ nose-style setup_module() and setup() functions are
+ supported
+
+* introduce generalized py.test.mark function marking
+
+* reshuffle / refine command line grouping
+
+* deprecate parser.addgroup in favour of getgroup which creates option group
+
+* add --report command line option that allows to control showing of skipped/xfailed sections
+
+* generalized skipping: a new way to mark python functions with skipif or xfail
+ at function, class and modules level based on platform or sys-module attributes.
+
+* extend py.test.mark decorator to allow for positional args
+
+* introduce and test "py.cleanup -d" to remove empty directories
+
+* fix issue #59 - robustify unittest test collection
+
+* make bpython/help interaction work by adding an __all__ attribute
+ to ApiModule, cleanup initpkg
+
+* use MIT license for pylib, add some contributors
+
+* remove py.execnet code and substitute all usages with 'execnet' proper
+
+* fix issue50 - cached_setup now caches more to expectations
+ for test functions with multiple arguments.
+
+* merge Jarko's fixes, issue #45 and #46
+
+* add the ability to specify a path for py.lookup to search in
+
+* fix a funcarg cached_setup bug probably only occuring
+ in distributed testing and "module" scope with teardown.
+
+* many fixes and changes for making the code base python3 compatible,
+ many thanks to Benjamin Peterson for helping with this.
+
+* consolidate builtins implementation to be compatible with >=2.3,
+ add helpers to ease keeping 2 and 3k compatible code
+
+* deprecate py.compat.doctest|subprocess|textwrap|optparse
+
+* deprecate py.magic.autopath, remove py/magic directory
+
+* move pytest assertion handling to py/code and a pytest_assertion
+ plugin, add "--no-assert" option, deprecate py.magic namespaces
+ in favour of (less) py.code ones.
+
+* consolidate and cleanup py/code classes and files
+
+* cleanup py/misc, move tests to bin-for-dist
+
+* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
+
+* consolidate py.log implementation, remove old approach.
+
+* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
+ text/unicode and byte-streams (uses underlying standard lib io.*
+ if available)
+
+* make py.unittest_convert helper script available which converts "unittest.py"
+ style files into the simpler assert/direct-test-classes py.test/nosetests
+ style. The script was written by Laura Creighton.
+
+* simplified internal localpath implementation
+
+1.0.2
+=====
+
+* fixing packaging issues, triggered by fedora redhat packaging,
+ also added doc, examples and contrib dirs to the tarball.
+
+* added a documentation link to the new django plugin.
+
+1.0.1
+=====
+
+* added a 'pytest_nose' plugin which handles nose.SkipTest,
+ nose-style function/method/generator setup/teardown and
+ tries to report functions correctly.
+
+* capturing of unicode writes or encoded strings to sys.stdout/err
+ work better, also terminalwriting was adapted and somewhat
+ unified between windows and linux.
+
+* improved documentation layout and content a lot
+
+* added a "--help-config" option to show conftest.py / ENV-var names for
+ all longopt cmdline options, and some special conftest.py variables.
+ renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
+
+* fix issue #27: better reporting on non-collectable items given on commandline
+ (e.g. pyc files)
+
+* fix issue #33: added --version flag (thanks Benjamin Peterson)
+
+* fix issue #32: adding support for "incomplete" paths to wcpath.status()
+
+* "Test" prefixed classes are *not* collected by default anymore if they
+ have an __init__ method
+
+* monkeypatch setenv() now accepts a "prepend" parameter
+
+* improved reporting of collection error tracebacks
+
+* simplified multicall mechanism and plugin architecture,
+ renamed some internal methods and argnames
+
+1.0.0
+=====
+
+* more terse reporting try to show filesystem path relatively to current dir
+* improve xfail output a bit
+
+1.0.0b9
+=======
+
+* cleanly handle and report final teardown of test setup
+
+* fix svn-1.6 compat issue with py.path.svnwc().versioned()
+ (thanks Wouter Vanden Hove)
+
+* setup/teardown or collection problems now show as ERRORs
+ or with big "E"'s in the progress lines. they are reported
+ and counted separately.
+
+* dist-testing: properly handle test items that get locally
+ collected but cannot be collected on the remote side - often
+ due to platform/dependency reasons
+
+* simplified py.test.mark API - see keyword plugin documentation
+
+* integrate better with logging: capturing now by default captures
+ test functions and their immediate setup/teardown in a single stream
+
+* capsys and capfd funcargs now have a readouterr() and a close() method
+ (underlyingly py.io.StdCapture/FD objects are used which grew a
+ readouterr() method as well to return snapshots of captured out/err)
+
+* make assert-reinterpretation work better with comparisons not
+ returning bools (reported with numpy from thanks maciej fijalkowski)
+
+* reworked per-test output capturing into the pytest_iocapture.py plugin
+ and thus removed capturing code from config object
+
+* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
+
+
+1.0.0b8
+=======
+
+* pytest_unittest-plugin is now enabled by default
+
+* introduced pytest_keyboardinterrupt hook and
+ refined pytest_sessionfinish hooked, added tests.
+
+* workaround a buggy logging module interaction ("closing already closed
+ files"). Thanks to Sridhar Ratnakumar for triggering.
+
+* if plugins use "py.test.importorskip" for importing
+ a dependency only a warning will be issued instead
+ of exiting the testing process.
+
+* many improvements to docs:
+ - refined funcargs doc , use the term "factory" instead of "provider"
+ - added a new talk/tutorial doc page
+ - better download page
+ - better plugin docstrings
+ - added new plugins page and automatic doc generation script
+
+* fixed teardown problem related to partially failing funcarg setups
+ (thanks MrTopf for reporting), "pytest_runtest_teardown" is now
+ always invoked even if the "pytest_runtest_setup" failed.
+
+* tweaked doctest output for docstrings in py modules,
+ thanks Radomir.
+
+1.0.0b7
+=======
+
+* renamed py.test.xfail back to py.test.mark.xfail to avoid
+ two ways to decorate for xfail
+
+* re-added py.test.mark decorator for setting keywords on functions
+ (it was actually documented so removing it was not nice)
+
+* remove scope-argument from request.addfinalizer() because
+ request.cached_setup has the scope arg. TOOWTDI.
+
+* perform setup finalization before reporting failures
+
+* apply modified patches from Andreas Kloeckner to allow
+ test functions to have no func_code (#22) and to make
+ "-k" and function keywords work (#20)
+
+* apply patch from Daniel Peolzleithner (issue #23)
+
+* resolve issue #18, multiprocessing.Manager() and
+ redirection clash
+
+* make __name__ == "__channelexec__" for remote_exec code
+
+1.0.0b3
+=======
+
+* plugin classes are removed: one now defines
+ hooks directly in conftest.py or global pytest_*.py
+ files.
+
+* added new pytest_namespace(config) hook that allows
+ to inject helpers directly to the py.test.* namespace.
+
+* documented and refined many hooks
+
+* added new style of generative tests via
+ pytest_generate_tests hook that integrates
+ well with function arguments.
+
+
+1.0.0b1
+=======
+
+* introduced new "funcarg" setup method,
+ see doc/test/funcarg.txt
+
+* introduced plugin architecture and many
+ new py.test plugins, see
+ doc/test/plugins.txt
+
+* teardown_method is now guaranteed to get
+ called after a test method has run.
+
+* new method: py.test.importorskip(mod,minversion)
+ will either import or call py.test.skip()
+
+* completely revised internal py.test architecture
+
+* new py.process.ForkedFunc object allowing to
+ fork execution of a function to a sub process
+ and getting a result back.
+
+XXX lots of things missing here XXX
+
+0.9.2
+=====
+
+* refined installation and metadata, created new setup.py,
+ now based on setuptools/ez_setup (thanks to Ralf Schmitt
+ for his support).
+
+* improved the way of making py.* scripts available in
+ windows environments, they are now added to the
+ Scripts directory as ".cmd" files.
+
+* py.path.svnwc.status() now is more complete and
+ uses xml output from the 'svn' command if available
+ (Guido Wesdorp)
+
+* fix for py.path.svn* to work with svn 1.5
+ (Chris Lamb)
+
+* fix path.relto(otherpath) method on windows to
+ use normcase for checking if a path is relative.
+
+* py.test's traceback is better parseable from editors
+ (follows the filenames:LINENO: MSG convention)
+ (thanks to Osmo Salomaa)
+
+* fix to javascript-generation, "py.test --runbrowser"
+ should work more reliably now
+
+* removed previously accidentally added
+ py.test.broken and py.test.notimplemented helpers.
+
+* there now is a py.__version__ attribute
+
+0.9.1
+=====
+
+This is a fairly complete list of v0.9.1, which can
+serve as a reference for developers.
+
+* allowing + signs in py.path.svn urls [39106]
+* fixed support for Failed exceptions without excinfo in py.test [39340]
+* added support for killing processes for Windows (as well as platforms that
+ support os.kill) in py.misc.killproc [39655]
+* added setup/teardown for generative tests to py.test [40702]
+* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
+* fixed problem with calling .remove() on wcpaths of non-versioned files in
+ py.path [44248]
+* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
+* fail to run greenlet tests when pypy is available, but without stackless
+ [45294]
+* small fixes in rsession tests [45295]
+* fixed issue with 2.5 type representations in py.test [45483, 45484]
+* made that internal reporting issues displaying is done atomically in py.test
+ [45518]
+* made that non-existing files are igored by the py.lookup script [45519]
+* improved exception name creation in py.test [45535]
+* made that less threads are used in execnet [merge in 45539]
+* removed lock required for atomical reporting issue displaying in py.test
+ [45545]
+* removed globals from execnet [45541, 45547]
+* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
+ get called in 2.5 (py.execnet) [45548]
+* fixed bug in joining threads in py.execnet's servemain [45549]
+* refactored py.test.rsession tests to not rely on exact output format anymore
+ [45646]
+* using repr() on test outcome [45647]
+* added 'Reason' classes for py.test.skip() [45648, 45649]
+* killed some unnecessary sanity check in py.test.collect [45655]
+* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
+ usable by Administrators [45901]
+* added support for locking and non-recursive commits to py.path.svnwc [45994]
+* locking files in py.execnet to prevent CPython from segfaulting [46010]
+* added export() method to py.path.svnurl
+* fixed -d -x in py.test [47277]
+* fixed argument concatenation problem in py.path.svnwc [49423]
+* restore py.test behaviour that it exits with code 1 when there are failures
+ [49974]
+* don't fail on html files that don't have an accompanying .txt file [50606]
+* fixed 'utestconvert.py < input' [50645]
+* small fix for code indentation in py.code.source [50755]
+* fix _docgen.py documentation building [51285]
+* improved checks for source representation of code blocks in py.test [51292]
+* added support for passing authentication to py.path.svn* objects [52000,
+ 52001]
+* removed sorted() call for py.apigen tests in favour of [].sort() to support
+ Python 2.3 [52481]
diff --git a/testing/web-platform/tests/tools/pytest/CONTRIBUTING.rst b/testing/web-platform/tests/tools/pytest/CONTRIBUTING.rst
new file mode 100644
index 000000000..75ee3ec32
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/CONTRIBUTING.rst
@@ -0,0 +1,253 @@
+============================
+Contribution getting started
+============================
+
+Contributions are highly welcomed and appreciated. Every little help counts,
+so do not hesitate!
+
+.. contents:: Contribution links
+ :depth: 2
+
+
+.. _submitfeedback:
+
+Feature requests and feedback
+-----------------------------
+
+Do you like pytest? Share some love on Twitter or in your blog posts!
+
+We'd also like to hear about your propositions and suggestions. Feel free to
+`submit them as issues <https://github.com/pytest-dev/pytest/issues>`_ and:
+
+* Explain in detail how they should work.
+* Keep the scope as narrow as possible. This will make it easier to implement.
+
+
+.. _reportbugs:
+
+Report bugs
+-----------
+
+Report bugs for pytest in the `issue tracker <https://github.com/pytest-dev/pytest/issues>`_.
+
+If you are reporting a bug, please include:
+
+* Your operating system name and version.
+* Any details about your local setup that might be helpful in troubleshooting,
+ specifically Python interpreter version,
+ installed libraries and pytest version.
+* Detailed steps to reproduce the bug.
+
+If you can write a demonstration test that currently fails but should pass (xfail),
+that is a very useful commit to make as well, even if you can't find how
+to fix the bug yet.
+
+
+.. _fixbugs:
+
+Fix bugs
+--------
+
+Look through the GitHub issues for bugs. Here is sample filter you can use:
+https://github.com/pytest-dev/pytest/labels/bug
+
+:ref:`Talk <contact>` to developers to find out how you can fix specific bugs.
+
+Don't forget to check the issue trackers of your favourite plugins, too!
+
+.. _writeplugins:
+
+Implement features
+------------------
+
+Look through the GitHub issues for enhancements. Here is sample filter you
+can use:
+https://github.com/pytest-dev/pytest/labels/enhancement
+
+:ref:`Talk <contact>` to developers to find out how you can implement specific
+features.
+
+Write documentation
+-------------------
+
+pytest could always use more documentation. What exactly is needed?
+
+* More complementary documentation. Have you perhaps found something unclear?
+* Documentation translations. We currently have only English.
+* Docstrings. There can never be too many of them.
+* Blog posts, articles and such -- they're all very appreciated.
+
+You can also edit documentation files directly in the Github web interface
+without needing to make a fork and local copy. This can be convenient for
+small fixes.
+
+
+.. _submitplugin:
+
+Submitting Plugins to pytest-dev
+--------------------------------
+
+Pytest development of the core, some plugins and support code happens
+in repositories living under the ``pytest-dev`` organisations:
+
+- `pytest-dev on GitHub <https://github.com/pytest-dev>`_
+
+- `pytest-dev on Bitbucket <https://bitbucket.org/pytest-dev>`_
+
+All pytest-dev Contributors team members have write access to all contained
+repositories. pytest core and plugins are generally developed
+using `pull requests`_ to respective repositories.
+
+The objectives of the ``pytest-dev`` organisation are:
+
+* Having a central location for popular pytest plugins
+* Sharing some of the maintenance responsibility (in case a maintainer no longer whishes to maintain a plugin)
+
+You can submit your plugin by subscribing to the `pytest-dev mail list
+<https://mail.python.org/mailman/listinfo/pytest-dev>`_ and writing a
+mail pointing to your existing pytest plugin repository which must have
+the following:
+
+- PyPI presence with a ``setup.py`` that contains a license, ``pytest-``
+ prefixed name, version number, authors, short and long description.
+
+- a ``tox.ini`` for running tests using `tox <http://tox.testrun.org>`_.
+
+- a ``README.txt`` describing how to use the plugin and on which
+ platforms it runs.
+
+- a ``LICENSE.txt`` file or equivalent containing the licensing
+ information, with matching info in ``setup.py``.
+
+- an issue tracker for bug reports and enhancement requests.
+
+If no contributor strongly objects and two agree, the repository can then be
+transferred to the ``pytest-dev`` organisation.
+
+Here's a rundown of how a repository transfer usually proceeds
+(using a repository named ``joedoe/pytest-xyz`` as example):
+
+* One of the ``pytest-dev`` administrators creates:
+
+ - ``pytest-xyz-admin`` team, with full administration rights to
+ ``pytest-dev/pytest-xyz``.
+ - ``pytest-xyz-developers`` team, with write access to
+ ``pytest-dev/pytest-xyz``.
+
+* ``joedoe`` is invited to the ``pytest-xyz-admin`` team;
+
+* After accepting the invitation, ``joedoe`` transfers the repository from its
+ original location to ``pytest-dev/pytest-xyz`` (A nice feature is that GitHub handles URL redirection from
+ the old to the new location automatically).
+
+* ``joedoe`` is free to add any other collaborators to the
+ ``pytest-xyz-admin`` or ``pytest-xyz-developers`` team as desired.
+
+The ``pytest-dev/Contributors`` team has write access to all projects, and
+every project administrator is in it. We recommend that each plugin has at least three
+people who have the right to release to PyPI.
+
+Repository owners can be assured that no ``pytest-dev`` administrator will ever make
+releases of your repository or take ownership in any way, except in rare cases
+where someone becomes unresponsive after months of contact attempts.
+As stated, the objective is to share maintenance and avoid "plugin-abandon".
+
+
+.. _`pull requests`:
+.. _pull-requests:
+
+Preparing Pull Requests on GitHub
+---------------------------------
+
+There's an excellent tutorial on how Pull Requests work in the
+`GitHub Help Center <https://help.github.com/articles/using-pull-requests/>`_
+
+
+.. note::
+ What is a "pull request"? It informs project's core developers about the
+ changes you want to review and merge. Pull requests are stored on
+ `GitHub servers <https://github.com/pytest-dev/pytest/pulls>`_.
+ Once you send pull request, we can discuss it's potential modifications and
+ even add more commits to it later on.
+
+There's an excellent tutorial on how Pull Requests work in the
+`GitHub Help Center <https://help.github.com/articles/using-pull-requests/>`_,
+but here is a simple overview:
+
+#. Fork the
+ `pytest GitHub repository <https://github.com/pytest-dev/pytest>`__. It's
+ fine to use ``pytest`` as your fork repository name because it will live
+ under your user.
+
+#. Clone your fork locally using `git <https://git-scm.com/>`_ and create a branch::
+
+ $ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git
+ $ cd pytest
+ # now, to fix a bug create your own branch off "master":
+
+ $ git checkout -b your-bugfix-branch-name master
+
+ # or to instead add a feature create your own branch off "features":
+
+ $ git checkout -b your-feature-branch-name features
+
+ Given we have "major.minor.micro" version numbers, bugfixes will usually
+ be released in micro releases whereas features will be released in
+ minor releases and incompatible changes in major releases.
+
+ If you need some help with Git, follow this quick start
+ guide: https://git.wiki.kernel.org/index.php/QuickStart
+
+#. Install tox
+
+ Tox is used to run all the tests and will automatically setup virtualenvs
+ to run the tests in.
+ (will implicitly use http://www.virtualenv.org/en/latest/)::
+
+ $ pip install tox
+
+#. Run all the tests
+
+ You need to have Python 2.7 and 3.5 available in your system. Now
+ running tests is as simple as issuing this command::
+
+ $ python runtox.py -e linting,py27,py35
+
+ This command will run tests via the "tox" tool against Python 2.7 and 3.5
+ and also perform "lint" coding-style checks. ``runtox.py`` is
+ a thin wrapper around ``tox`` which installs from a development package
+ index where newer (not yet released to pypi) versions of dependencies
+ (especially ``py``) might be present.
+
+#. You can now edit your local working copy.
+
+ You can now make the changes you want and run the tests again as necessary.
+
+ To run tests on py27 and pass options to pytest (e.g. enter pdb on failure)
+ to pytest you can do::
+
+ $ python runtox.py -e py27 -- --pdb
+
+ or to only run tests in a particular test module on py35::
+
+ $ python runtox.py -e py35 -- testing/test_config.py
+
+#. Commit and push once your tests pass and you are happy with your change(s)::
+
+ $ git commit -a -m "<commit message>"
+ $ git push -u
+
+ Make sure you add a CHANGELOG message, and add yourself to AUTHORS. If you
+ are unsure about either of these steps, submit your pull request and we'll
+ help you fix it up.
+
+#. Finally, submit a pull request through the GitHub website using this data::
+
+ head-fork: YOUR_GITHUB_USERNAME/pytest
+ compare: your-branch-name
+
+ base-fork: pytest-dev/pytest
+ base: master # if it's a bugfix
+ base: feature # if it's a feature
+
+
diff --git a/testing/web-platform/tests/tools/pytest/HOWTORELEASE.rst b/testing/web-platform/tests/tools/pytest/HOWTORELEASE.rst
new file mode 100644
index 000000000..3ebfa28b1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/HOWTORELEASE.rst
@@ -0,0 +1,92 @@
+How to release pytest
+--------------------------------------------
+
+Note: this assumes you have already registered on pypi.
+
+0. create the branch release-VERSION
+ use features as base for minor/major releases
+ and master as base for bugfix releases
+
+1. Bump version numbers in _pytest/__init__.py (setup.py reads it)
+
+2. Check and finalize CHANGELOG
+
+3. Write doc/en/announce/release-VERSION.txt and include
+ it in doc/en/announce/index.txt::
+
+ git log 2.8.2..HEAD --format='%aN' | sort -u # lists the names of authors involved
+
+4. Use devpi for uploading a release tarball to a staging area::
+
+ devpi use https://devpi.net/USER/dev
+ devpi upload --formats sdist,bdist_wheel
+
+5. Run from multiple machines::
+
+ devpi use https://devpi.net/USER/dev
+ devpi test pytest==VERSION
+
+6. Check that tests pass for relevant combinations with::
+
+ devpi list pytest
+
+ or look at failures with "devpi list -f pytest".
+
+7. Regenerate the docs examples using tox, and check for regressions::
+
+ tox -e regen
+ git diff
+
+
+8. Build the docs, you need a virtualenv with py and sphinx
+ installed::
+
+ cd doc/en
+ make html
+
+ Commit any changes before tagging the release.
+
+9. Tag the release::
+
+ git tag VERSION
+ git push
+
+10. Upload the docs using doc/en/Makefile::
+
+ cd doc/en
+ make install # or "installall" if you have LaTeX installed for PDF
+
+ This requires ssh-login permission on pytest.org because it uses
+ rsync.
+ Note that the ``install`` target of ``doc/en/Makefile`` defines where the
+ rsync goes to, typically to the "latest" section of pytest.org.
+
+ If you are making a minor release (e.g. 5.4), you also need to manually
+ create a symlink for "latest"::
+
+ ssh pytest-dev@pytest.org
+ ln -s 5.4 latest
+
+ Browse to pytest.org to verify.
+
+11. Publish to pypi::
+
+ devpi push pytest-VERSION pypi:NAME
+
+ where NAME is the name of pypi.python.org as configured in your ``~/.pypirc``
+ file `for devpi <http://doc.devpi.net/latest/quickstart-releaseprocess.html?highlight=pypirc#devpi-push-releasing-to-an-external-index>`_.
+
+
+12. Send release announcement to mailing lists:
+
+ - pytest-dev
+ - testing-in-python
+ - python-announce-list@python.org
+
+
+13. **after the release** Bump the version number in ``_pytest/__init__.py``,
+ to the next Minor release version (i.e. if you released ``pytest-2.8.0``,
+ set it to ``pytest-2.9.0.dev1``).
+
+14. merge the actual release into the master branch and do a pull request against it
+15. merge from master to features
diff --git a/testing/web-platform/tests/tools/pytest/ISSUES.txt b/testing/web-platform/tests/tools/pytest/ISSUES.txt
new file mode 100644
index 000000000..081d727e8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/ISSUES.txt
@@ -0,0 +1,365 @@
+
+
+recorder = monkeypatch.function(".......")
+-------------------------------------------------------------
+tags: nice feature
+
+Like monkeypatch.replace but sets a mock-like call recorder:
+
+ recorder = monkeypatch.function("os.path.abspath")
+ recorder.set_return("/hello")
+ os.path.abspath("hello")
+ call, = recorder.calls
+ assert call.args.path == "hello"
+ assert call.returned == "/hello"
+ ...
+
+Unlike mock, "args.path" acts on the parsed auto-spec'ed ``os.path.abspath``
+so it's independent from if the client side called "os.path.abspath(path=...)"
+or "os.path.abspath('positional')".
+
+
+refine parametrize API
+-------------------------------------------------------------
+tags: critical feature
+
+extend metafunc.parametrize to directly support indirection, example:
+
+ def setupdb(request, config):
+ # setup "resource" based on test request and the values passed
+ # in to parametrize. setupfunc is called for each such value.
+ # you may use request.addfinalizer() or request.cached_setup ...
+ return dynamic_setup_database(val)
+
+ @pytest.mark.parametrize("db", ["pg", "mysql"], setupfunc=setupdb)
+ def test_heavy_functional_test(db):
+ ...
+
+There would be no need to write or explain funcarg factories and
+their special __ syntax.
+
+The examples and improvements should also show how to put the parametrize
+decorator to a class, to a module or even to a directory. For the directory
+part a conftest.py content like this::
+
+ pytestmark = [
+ @pytest.mark.parametrize_setup("db", ...),
+ ]
+
+probably makes sense in order to keep the declarative nature. This mirrors
+the marker-mechanism with respect to a test module but puts it to a directory
+scale.
+
+When doing larger scoped parametrization it probably becomes necessary
+to allow parametrization to be ignored if the according parameter is not
+used (currently any parametrized argument that is not present in a function will cause a ValueError). Example:
+
+ @pytest.mark.parametrize("db", ..., mustmatch=False)
+
+means to not raise an error but simply ignore the parametrization
+if the signature of a decorated function does not match. XXX is it
+not sufficient to always allow non-matches?
+
+
+allow parametrized attributes on classes
+--------------------------------------------------
+
+tags: wish 2.4
+
+example:
+
+ @pytest.mark.parametrize_attr("db", setupfunc, [1,2,3], scope="class")
+ @pytest.mark.parametrize_attr("tmp", setupfunc, scope="...")
+ class TestMe:
+ def test_hello(self):
+ access self.db ...
+
+this would run the test_hello() function three times with three
+different values for self.db. This could also work with unittest/nose
+style tests, i.e. it leverages existing test suites without needing
+to rewrite them. Together with the previously mentioned setup_test()
+maybe the setupfunc could be omitted?
+
+optimizations
+---------------------------------------------------------------
+tags: 2.4 core
+
+- look at ihook optimization such that all lookups for
+ hooks relating to the same fspath are cached.
+
+fix start/finish partial finailization problem
+---------------------------------------------------------------
+tags: bug core
+
+if a configure/runtest_setup/sessionstart/... hook invocation partially
+fails the sessionfinishes is not called. Each hook implementation
+should better be repsonsible for registering a cleanup/finalizer
+appropriately to avoid this issue. Moreover/Alternatively, we could
+record which implementations of a hook succeeded and only call their
+teardown.
+
+
+relax requirement to have tests/testing contain an __init__
+----------------------------------------------------------------
+tags: feature
+bb: http://bitbucket.org/hpk42/py-trunk/issue/64
+
+A local test run of a "tests" directory may work
+but a remote one fail because the tests directory
+does not contain an "__init__.py". Either give
+an error or make it work without the __init__.py
+i.e. port the nose-logic of unloading a test module.
+
+customize test function collection
+-------------------------------------------------------
+tags: feature
+
+- introduce pytest.mark.nocollect for not considering a function for
+ test collection at all. maybe also introduce a pytest.mark.test to
+ explicitly mark a function to become a tested one. Lookup JUnit ways
+ of tagging tests.
+
+introduce pytest.mark.importorskip
+-------------------------------------------------------
+tags: feature
+
+in addition to the imperative pytest.importorskip also introduce
+a pytest.mark.importorskip so that the test count is more correct.
+
+
+introduce pytest.mark.platform
+-------------------------------------------------------
+tags: feature
+
+Introduce nice-to-spell platform-skipping, examples:
+
+ @pytest.mark.platform("python3")
+ @pytest.mark.platform("not python3")
+ @pytest.mark.platform("win32 and not python3")
+ @pytest.mark.platform("darwin")
+ @pytest.mark.platform("not (jython and win32)")
+ @pytest.mark.platform("not (jython and win32)", xfail=True)
+
+etc. Idea is to allow Python expressions which can operate
+on common spellings for operating systems and python
+interpreter versions.
+
+pytest.mark.xfail signature change
+-------------------------------------------------------
+tags: feature
+
+change to pytest.mark.xfail(reason, (optional)condition)
+to better implement the word meaning. It also signals
+better that we always have some kind of an implementation
+reason that can be formualated.
+Compatibility? how to introduce a new name/keep compat?
+
+allow to non-intrusively apply skipfs/xfail/marks
+---------------------------------------------------
+tags: feature
+
+use case: mark a module or directory structures
+to be skipped on certain platforms (i.e. no import
+attempt will be made).
+
+consider introducing a hook/mechanism that allows to apply marks
+from conftests or plugins. (See extended parametrization)
+
+
+explicit referencing of conftest.py files
+-----------------------------------------
+tags: feature
+
+allow to name conftest.py files (in sub directories) that should
+be imported early, as to include command line options.
+
+improve central pytest ini file
+-------------------------------
+tags: feature
+
+introduce more declarative configuration options:
+- (to-be-collected test directories)
+- required plugins
+- test func/class/file matching patterns
+- skip/xfail (non-intrusive)
+- pytest.ini and tox.ini and setup.cfg configuration in the same file
+
+new documentation
+----------------------------------
+tags: feature
+
+- logo pytest
+- examples for unittest or functional testing
+- resource management for functional testing
+- patterns: page object
+
+have imported module mismatch honour relative paths
+--------------------------------------------------------
+tags: bug
+
+With 1.1.1 pytest fails at least on windows if an import
+is relative and compared against an absolute conftest.py
+path. Normalize.
+
+consider globals: pytest.ensuretemp and config
+--------------------------------------------------------------
+tags: experimental-wish
+
+consider deprecating pytest.ensuretemp and pytest.config
+to further reduce pytest globality. Also consider
+having pytest.config and ensuretemp coming from
+a plugin rather than being there from the start.
+
+
+consider pytest_addsyspath hook
+-----------------------------------------
+tags: wish
+
+pytest could call a new pytest_addsyspath() in order to systematically
+allow manipulation of sys.path and to inhibit it via --no-addsyspath
+in order to more easily run against installed packages.
+
+Alternatively it could also be done via the config object
+and pytest_configure.
+
+
+
+deprecate global pytest.config usage
+----------------------------------------------------------------
+tags: feature
+
+pytest.ensuretemp and pytest.config are probably the last
+objects containing global state. Often using them is not
+necessary. This is about trying to get rid of them, i.e.
+deprecating them and checking with PyPy's usages as well
+as others.
+
+remove deprecated bits in collect.py
+-------------------------------------------------------------------
+tags: feature
+
+In an effort to further simplify code, review and remove deprecated bits
+in collect.py. Probably good:
+- inline consider_file/dir methods, no need to have them
+ subclass-overridable because of hooks
+
+implement fslayout decorator
+---------------------------------
+tags: feature
+
+Improve the way how tests can work with pre-made examples,
+keeping the layout close to the test function:
+
+@pytest.mark.fslayout("""
+ conftest.py:
+ # empty
+ tests/
+ test_%(NAME)s: # becomes test_run1.py
+ def test_function(self):
+ pass
+""")
+def test_run(pytester, fslayout):
+ p = fslayout.findone("test_*.py")
+ result = pytester.runpytest(p)
+ assert result.ret == 0
+ assert result.passed == 1
+
+Another idea is to allow to define a full scenario including the run
+in one content string::
+
+ runscenario("""
+ test_{TESTNAME}.py:
+ import pytest
+ @pytest.mark.xfail
+ def test_that_fails():
+ assert 0
+
+ @pytest.mark.skipif("True")
+ def test_hello():
+ pass
+
+ conftest.py:
+ import pytest
+ def pytest_runsetup_setup(item):
+ pytest.skip("abc")
+
+ runpytest -rsxX
+ *SKIP*{TESTNAME}*
+ *1 skipped*
+ """)
+
+This could be run with at least three different ways to invoke pytest:
+through the shell, through "python -m pytest" and inlined. As inlined
+would be the fastest it could be run first (or "--fast" mode).
+
+
+Create isolate plugin
+---------------------
+tags: feature
+
+The idea is that you can e.g. import modules in a test and afterwards
+sys.modules, sys.meta_path etc would be reverted. It can go further
+then just importing however, e.g. current working directory, file
+descriptors, ...
+
+This would probably be done by marking::
+
+ @pytest.mark.isolate(importing=True, cwd=True, fds=False)
+ def test_foo():
+ ...
+
+With the possibility of doing this globally in an ini-file.
+
+
+fnmatch for test names
+----------------------
+tags: feature-wish
+
+various testsuites use suffixes instead of prefixes for test classes
+also it lends itself to bdd style test names::
+
+ class UserBehaviour:
+ def anonymous_should_not_have_inbox(user):
+ ...
+ def registred_should_have_inbox(user):
+ ..
+
+using the following in pytest.ini::
+
+ [pytest]
+ python_classes = Test *Behaviour *Test
+ python_functions = test *_should_*
+
+
+mechanism for running named parts of tests with different reporting behaviour
+------------------------------------------------------------------------------
+tags: feature-wish-incomplete
+
+a few use-cases come to mind:
+
+* fail assertions and record that without stopping a complete test
+
+ * this is in particular hepfull if a small bit of a test is known to fail/xfail::
+
+ def test_fun():
+ with pytest.section('fdcheck, marks=pytest.mark.xfail_if(...)):
+ breaks_on_windows()
+
+* divide functional/acceptance tests into sections
+* provide a different mechanism for generators, maybe something like::
+
+ def pytest_runtest_call(item)
+ if not generator:
+ ...
+ prepare_check = GeneratorCheckprepare()
+
+ gen = item.obj(**fixtures)
+ for check in gen
+ id, call = prepare_check(check)
+ # bubble should only prevent exception propagation after a failure
+ # the whole test should still fail
+ # there might be need for a lower level api and taking custom markers into account
+ with pytest.section(id, bubble=False):
+ call()
+
+
diff --git a/testing/web-platform/tests/tools/pytest/LICENSE b/testing/web-platform/tests/tools/pytest/LICENSE
new file mode 100644
index 000000000..9e27bd784
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2004-2016 Holger Krekel and others
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/testing/web-platform/tests/tools/pytest/MANIFEST.in b/testing/web-platform/tests/tools/pytest/MANIFEST.in
new file mode 100644
index 000000000..266a9184d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/MANIFEST.in
@@ -0,0 +1,34 @@
+include CHANGELOG.rst
+include LICENSE
+include AUTHORS
+
+include README.rst
+include CONTRIBUTING.rst
+
+include tox.ini
+include setup.py
+
+include .coveragerc
+
+include plugin-test.sh
+include requirements-docs.txt
+include runtox.py
+
+recursive-include bench *.py
+recursive-include extra *.py
+
+graft testing
+graft doc
+
+exclude _pytest/impl
+
+graft _pytest/vendored_packages
+
+recursive-exclude * *.pyc *.pyo
+
+exclude appveyor/install.ps1
+exclude appveyor.yml
+exclude appveyor
+
+exclude ISSUES.txt
+exclude HOWTORELEASE.rst
diff --git a/testing/web-platform/tests/tools/pytest/README.rst b/testing/web-platform/tests/tools/pytest/README.rst
new file mode 100644
index 000000000..68fc92211
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/README.rst
@@ -0,0 +1,102 @@
+.. image:: http://pytest.org/latest/_static/pytest1.png
+ :target: http://pytest.org
+ :align: center
+ :alt: pytest
+
+------
+
+.. image:: https://img.shields.io/pypi/v/pytest.svg
+ :target: https://pypi.python.org/pypi/pytest
+.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
+ :target: https://pypi.python.org/pypi/pytest
+.. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg
+ :target: https://coveralls.io/r/pytest-dev/pytest
+.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master
+ :target: https://travis-ci.org/pytest-dev/pytest
+.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true
+ :target: https://ci.appveyor.com/project/pytestbot/pytest
+
+The ``pytest`` framework makes it easy to write small tests, yet
+scales to support complex functional testing for applications and libraries.
+
+An example of a simple test:
+
+.. code-block:: python
+
+ # content of test_sample.py
+ def func(x):
+ return x + 1
+
+ def test_answer():
+ assert func(3) == 5
+
+
+To execute it::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1
+ collected 1 items
+
+ test_sample.py F
+
+ ======= FAILURES ========
+ _______ test_answer ________
+
+ def test_answer():
+ > assert func(3) == 5
+ E assert 4 == 5
+ E + where 4 = func(3)
+
+ test_sample.py:5: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://pytest.org/latest/getting-started.html#our-first-test-run>`_ for more examples.
+
+
+Features
+--------
+
+- Detailed info on failing `assert statements <http://pytest.org/latest/assert.html>`_ (no need to remember ``self.assert*`` names);
+
+- `Auto-discovery
+ <http://pytest.org/latest/goodpractices.html#python-test-discovery>`_
+ of test modules and functions;
+
+- `Modular fixtures <http://pytest.org/latest/fixture.html>`_ for
+ managing small or parametrized long-lived test resources;
+
+- Can run `unittest <http://pytest.org/latest/unittest.html>`_ (or trial),
+ `nose <http://pytest.org/latest/nose.html>`_ test suites out of the box;
+
+- Python2.6+, Python3.2+, PyPy-2.3, Jython-2.5 (untested);
+
+- Rich plugin architecture, with over 150+ `external plugins <http://pytest.org/latest/plugins.html#installing-external-plugins-searching>`_ and thriving community;
+
+
+Documentation
+-------------
+
+For full documentation, including installation, tutorials and PDF documents, please see http://pytest.org.
+
+
+Bugs/Requests
+-------------
+
+Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
+
+
+Changelog
+---------
+
+Consult the `Changelog <http://pytest.org/latest/changelog.html>`_ page for fixes and enhancements of each version.
+
+
+License
+-------
+
+Copyright Holger Krekel and others, 2004-2016.
+
+Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
+
+.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/__init__.py b/testing/web-platform/tests/tools/pytest/_pytest/__init__.py
new file mode 100644
index 000000000..723fb9e85
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/__init__.py
@@ -0,0 +1,2 @@
+#
+__version__ = '2.9.1'
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/_argcomplete.py b/testing/web-platform/tests/tools/pytest/_pytest/_argcomplete.py
new file mode 100644
index 000000000..955855a96
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/_argcomplete.py
@@ -0,0 +1,101 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn<TAB>"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/_code/__init__.py b/testing/web-platform/tests/tools/pytest/_pytest/_code/__init__.py
new file mode 100644
index 000000000..c046b9716
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/_code/__init__.py
@@ -0,0 +1,12 @@
+""" python inspection/code generation API """
+from .code import Code # noqa
+from .code import ExceptionInfo # noqa
+from .code import Frame # noqa
+from .code import Traceback # noqa
+from .code import getrawcode # noqa
+from .code import patch_builtins # noqa
+from .code import unpatch_builtins # noqa
+from .source import Source # noqa
+from .source import compile_ as compile # noqa
+from .source import getfslineno # noqa
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/_code/_py2traceback.py b/testing/web-platform/tests/tools/pytest/_pytest/_code/_py2traceback.py
new file mode 100644
index 000000000..a830d9899
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/_code/_py2traceback.py
@@ -0,0 +1,81 @@
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.last_type and sys.last_value. The return value is a list of
+ strings, each ending in a newline.
+
+ Normally, the list contains a single string; however, for
+ SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax
+ error occurred.
+
+ The message indicating which exception occurred is always the last
+ string in the list.
+
+ """
+
+ # An instance should not have a meaningful value parameter, but
+ # sometimes does, particularly for string exceptions, such as
+ # >>> raise string1, string2 # deprecated
+ #
+ # Clear these out first because issubtype(string1, SyntaxError)
+ # would throw another exception and mask the original problem.
+ if (isinstance(etype, BaseException) or
+ isinstance(etype, types.InstanceType) or
+ etype is None or type(etype) is str):
+ return [_format_final_exc_line(etype, value)]
+
+ stype = etype.__name__
+
+ if not issubclass(etype, SyntaxError):
+ return [_format_final_exc_line(stype, value)]
+
+ # It was a syntax error; show exactly where the problem was found.
+ lines = []
+ try:
+ msg, (filename, lineno, offset, badline) = value.args
+ except Exception:
+ pass
+ else:
+ filename = filename or "<string>"
+ lines.append(' File "%s", line %d\n' % (filename, lineno))
+ if badline is not None:
+ if isinstance(badline, bytes): # python 2 only
+ badline = badline.decode('utf-8', 'replace')
+ lines.append(u' %s\n' % badline.strip())
+ if offset is not None:
+ caretspace = badline.rstrip('\n')[:offset].lstrip()
+ # non-space whitespace (likes tabs) must be kept for alignment
+ caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+ # only three spaces to account for offset1 == pos 0
+ lines.append(' %s^\n' % ''.join(caretspace))
+ value = msg
+
+ lines.append(_format_final_exc_line(stype, value))
+ return lines
+
+def _format_final_exc_line(etype, value):
+ """Return a list of a single line -- normal case for format_exception_only"""
+ valuestr = _some_str(value)
+ if value is None or not valuestr:
+ line = "%s\n" % etype
+ else:
+ line = "%s: %s\n" % (etype, valuestr)
+ return line
+
+def _some_str(value):
+ try:
+ return unicode(value)
+ except Exception:
+ try:
+ return str(value)
+ except Exception:
+ pass
+ return '<unprintable %s object>' % type(value).__name__
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/_code/code.py b/testing/web-platform/tests/tools/pytest/_pytest/_code/code.py
new file mode 100644
index 000000000..bc68aac55
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/_code/code.py
@@ -0,0 +1,795 @@
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+
+import py
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+ from traceback import format_exception_only
+else:
+ from ._py2traceback import format_exception_only
+
+class Code(object):
+ """ wrapper around Python code objects """
+ def __init__(self, rawcode):
+ if not hasattr(rawcode, "co_filename"):
+ rawcode = getrawcode(rawcode)
+ try:
+ self.filename = rawcode.co_filename
+ self.firstlineno = rawcode.co_firstlineno - 1
+ self.name = rawcode.co_name
+ except AttributeError:
+ raise TypeError("not a code object: %r" %(rawcode,))
+ self.raw = rawcode
+
+ def __eq__(self, other):
+ return self.raw == other.raw
+
+ def __ne__(self, other):
+ return not self == other
+
+ @property
+ def path(self):
+ """ return a path object pointing to source code (note that it
+ might not point to an actually existing file). """
+ p = py.path.local(self.raw.co_filename)
+ # maybe don't try this checking
+ if not p.check():
+ # XXX maybe try harder like the weird logic
+ # in the standard lib [linecache.updatecache] does?
+ p = self.raw.co_filename
+ return p
+
+ @property
+ def fullsource(self):
+ """ return a _pytest._code.Source object for the full source file of the code
+ """
+ from _pytest._code import source
+ full, _ = source.findsource(self.raw)
+ return full
+
+ def source(self):
+ """ return a _pytest._code.Source object for the code object's source only
+ """
+ # return source only for that part of code
+ import _pytest._code
+ return _pytest._code.Source(self.raw)
+
+ def getargs(self, var=False):
+ """ return a tuple with the argument names for the code object
+
+ if 'var' is set True also return the names of the variable and
+ keyword arguments when present
+ """
+ # handfull shortcut for getting args
+ raw = self.raw
+ argcount = raw.co_argcount
+ if var:
+ argcount += raw.co_flags & CO_VARARGS
+ argcount += raw.co_flags & CO_VARKEYWORDS
+ return raw.co_varnames[:argcount]
+
+class Frame(object):
+ """Wrapper around a Python frame holding f_locals and f_globals
+ in which expressions can be evaluated."""
+
+ def __init__(self, frame):
+ self.lineno = frame.f_lineno - 1
+ self.f_globals = frame.f_globals
+ self.f_locals = frame.f_locals
+ self.raw = frame
+ self.code = Code(frame.f_code)
+
+ @property
+ def statement(self):
+ """ statement this frame is at """
+ import _pytest._code
+ if self.code.fullsource is None:
+ return _pytest._code.Source("")
+ return self.code.fullsource.getstatement(self.lineno)
+
+ def eval(self, code, **vars):
+ """ evaluate 'code' in the frame
+
+ 'vars' are optional additional local variables
+
+ returns the result of the evaluation
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ return eval(code, self.f_globals, f_locals)
+
+ def exec_(self, code, **vars):
+ """ exec 'code' in the frame
+
+ 'vars' are optiona; additional local variables
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ py.builtin.exec_(code, self.f_globals, f_locals )
+
+ def repr(self, object):
+ """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+ """
+ return py.io.saferepr(object)
+
+ def is_true(self, object):
+ return object
+
+ def getargs(self, var=False):
+ """ return a list of tuples (name, value) for all arguments
+
+ if 'var' is set True also include the variable and keyword
+ arguments when present
+ """
+ retval = []
+ for arg in self.code.getargs(var):
+ try:
+ retval.append((arg, self.f_locals[arg]))
+ except KeyError:
+ pass # this can occur when using Psyco
+ return retval
+
+class TracebackEntry(object):
+ """ a single entry in a traceback """
+
+ _repr_style = None
+ exprinfo = None
+
+ def __init__(self, rawentry):
+ self._rawentry = rawentry
+ self.lineno = rawentry.tb_lineno - 1
+
+ def set_repr_style(self, mode):
+ assert mode in ("short", "long")
+ self._repr_style = mode
+
+ @property
+ def frame(self):
+ import _pytest._code
+ return _pytest._code.Frame(self._rawentry.tb_frame)
+
+ @property
+ def relline(self):
+ return self.lineno - self.frame.code.firstlineno
+
+ def __repr__(self):
+ return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+ @property
+ def statement(self):
+ """ _pytest._code.Source object for the current statement """
+ source = self.frame.code.fullsource
+ return source.getstatement(self.lineno)
+
+ @property
+ def path(self):
+ """ path to the source code """
+ return self.frame.code.path
+
+ def getlocals(self):
+ return self.frame.f_locals
+ locals = property(getlocals, None, None, "locals of underlaying frame")
+
+ def reinterpret(self):
+ """Reinterpret the failing statement and returns a detailed information
+ about what operations are performed."""
+ from _pytest.assertion.reinterpret import reinterpret
+ if self.exprinfo is None:
+ source = py.builtin._totext(self.statement).strip()
+ x = reinterpret(source, self.frame, should_fail=True)
+ if not py.builtin._istext(x):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ self.exprinfo = x
+ return self.exprinfo
+
+ def getfirstlinesource(self):
+ # on Jython this firstlineno can be -1 apparently
+ return max(self.frame.code.firstlineno, 0)
+
+ def getsource(self, astcache=None):
+ """ return failing source code. """
+ # we use the passed in astcache to not reparse asttrees
+ # within exception info printing
+ from _pytest._code.source import getstatementrange_ast
+ source = self.frame.code.fullsource
+ if source is None:
+ return None
+ key = astnode = None
+ if astcache is not None:
+ key = self.frame.code.path
+ if key is not None:
+ astnode = astcache.get(key, None)
+ start = self.getfirstlinesource()
+ try:
+ astnode, _, end = getstatementrange_ast(self.lineno, source,
+ astnode=astnode)
+ except SyntaxError:
+ end = self.lineno + 1
+ else:
+ if key is not None:
+ astcache[key] = astnode
+ return source[start:end]
+
+ source = property(getsource)
+
+ def ishidden(self):
+ """ return True if the current frame has a var __tracebackhide__
+ resolving to True
+
+ mostly for internal use
+ """
+ try:
+ return self.frame.f_locals['__tracebackhide__']
+ except KeyError:
+ try:
+ return self.frame.f_globals['__tracebackhide__']
+ except KeyError:
+ return False
+
+ def __str__(self):
+ try:
+ fn = str(self.path)
+ except py.error.Error:
+ fn = '???'
+ name = self.frame.code.name
+ try:
+ line = str(self.statement).lstrip()
+ except KeyboardInterrupt:
+ raise
+ except:
+ line = "???"
+ return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
+
+ def name(self):
+ return self.frame.code.raw.co_name
+ name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+ """ Traceback objects encapsulate and offer higher level
+ access to Traceback entries.
+ """
+ Entry = TracebackEntry
+ def __init__(self, tb):
+ """ initialize from given python traceback object. """
+ if hasattr(tb, 'tb_next'):
+ def f(cur):
+ while cur is not None:
+ yield self.Entry(cur)
+ cur = cur.tb_next
+ list.__init__(self, f(tb))
+ else:
+ list.__init__(self, tb)
+
+ def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+ """ return a Traceback instance wrapping part of this Traceback
+
+ by provding any combination of path, lineno and firstlineno, the
+ first frame to start the to-be-returned traceback is determined
+
+ this allows cutting the first part of a Traceback instance e.g.
+ for formatting reasons (removing some uninteresting bits that deal
+ with handling of the exception/traceback)
+ """
+ for x in self:
+ code = x.frame.code
+ codepath = code.path
+ if ((path is None or codepath == path) and
+ (excludepath is None or not hasattr(codepath, 'relto') or
+ not codepath.relto(excludepath)) and
+ (lineno is None or x.lineno == lineno) and
+ (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+ return Traceback(x._rawentry)
+ return self
+
+ def __getitem__(self, key):
+ val = super(Traceback, self).__getitem__(key)
+ if isinstance(key, type(slice(0))):
+ val = self.__class__(val)
+ return val
+
+ def filter(self, fn=lambda x: not x.ishidden()):
+ """ return a Traceback instance with certain items removed
+
+ fn is a function that gets a single argument, a TracebackItem
+ instance, and should return True when the item should be added
+ to the Traceback, False when not
+
+ by default this removes all the TracebackItems which are hidden
+ (see ishidden() above)
+ """
+ return Traceback(filter(fn, self))
+
+ def getcrashentry(self):
+ """ return last non-hidden traceback entry that lead
+ to the exception of a traceback.
+ """
+ for i in range(-1, -len(self)-1, -1):
+ entry = self[i]
+ if not entry.ishidden():
+ return entry
+ return self[-1]
+
+ def recursionindex(self):
+ """ return the index of the frame/TracebackItem where recursion
+ originates if appropriate, None if no recursion occurred
+ """
+ cache = {}
+ for i, entry in enumerate(self):
+ # id for the code.raw is needed to work around
+ # the strange metaprogramming in the decorator lib from pypi
+ # which generates code objects that have hash/value equality
+ #XXX needs a test
+ key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+ #print "checking for recursion at", key
+ l = cache.setdefault(key, [])
+ if l:
+ f = entry.frame
+ loc = f.f_locals
+ for otherloc in l:
+ if f.is_true(f.eval(co_equal,
+ __recursioncache_locals_1=loc,
+ __recursioncache_locals_2=otherloc)):
+ return i
+ l.append(entry.frame.f_locals)
+ return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+ '?', 'eval')
+
+class ExceptionInfo(object):
+ """ wraps sys.exc_info() objects and offers
+ help for navigating the traceback.
+ """
+ _striptext = ''
+ def __init__(self, tup=None, exprinfo=None):
+ import _pytest._code
+ if tup is None:
+ tup = sys.exc_info()
+ if exprinfo is None and isinstance(tup[1], AssertionError):
+ exprinfo = getattr(tup[1], 'msg', None)
+ if exprinfo is None:
+ exprinfo = str(tup[1])
+ if exprinfo and exprinfo.startswith('assert '):
+ self._striptext = 'AssertionError: '
+ self._excinfo = tup
+ #: the exception class
+ self.type = tup[0]
+ #: the exception instance
+ self.value = tup[1]
+ #: the exception raw traceback
+ self.tb = tup[2]
+ #: the exception type name
+ self.typename = self.type.__name__
+ #: the exception traceback (_pytest._code.Traceback instance)
+ self.traceback = _pytest._code.Traceback(self.tb)
+
+ def __repr__(self):
+ return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+ def exconly(self, tryshort=False):
+ """ return the exception as a string
+
+ when 'tryshort' resolves to True, and the exception is a
+ _pytest._code._AssertionError, only the actual exception part of
+ the exception representation is returned (so 'AssertionError: ' is
+ removed from the beginning)
+ """
+ lines = format_exception_only(self.type, self.value)
+ text = ''.join(lines)
+ text = text.rstrip()
+ if tryshort:
+ if text.startswith(self._striptext):
+ text = text[len(self._striptext):]
+ return text
+
+ def errisinstance(self, exc):
+ """ return True if the exception is an instance of exc """
+ return isinstance(self.value, exc)
+
+ def _getreprcrash(self):
+ exconly = self.exconly(tryshort=True)
+ entry = self.traceback.getcrashentry()
+ path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+ return ReprFileLocation(path, lineno+1, exconly)
+
+ def getrepr(self, showlocals=False, style="long",
+ abspath=False, tbfilter=True, funcargs=False):
+ """ return str()able representation of this exception info.
+ showlocals: show locals per traceback entry
+ style: long|short|no|native traceback style
+ tbfilter: hide entries (where __tracebackhide__ is true)
+
+ in case of style==native, tbfilter and showlocals is ignored.
+ """
+ if style == 'native':
+ return ReprExceptionInfo(ReprTracebackNative(
+ py.std.traceback.format_exception(
+ self.type,
+ self.value,
+ self.traceback[0]._rawentry,
+ )), self._getreprcrash())
+
+ fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+ abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+ return fmt.repr_excinfo(self)
+
+ def __str__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return str(loc)
+
+ def __unicode__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return unicode(loc)
+
+
+class FormattedExcinfo(object):
+ """ presenting information about failing Functions and Generators. """
+ # for traceback entries
+ flow_marker = ">"
+ fail_marker = "E"
+
+ def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+ self.showlocals = showlocals
+ self.style = style
+ self.tbfilter = tbfilter
+ self.funcargs = funcargs
+ self.abspath = abspath
+ self.astcache = {}
+
+ def _getindent(self, source):
+ # figure out indent for given source
+ try:
+ s = str(source.getstatement(len(source)-1))
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ s = str(source[-1])
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 0
+ return 4 + (len(s) - len(s.lstrip()))
+
+ def _getentrysource(self, entry):
+ source = entry.getsource(self.astcache)
+ if source is not None:
+ source = source.deindent()
+ return source
+
+ def _saferepr(self, obj):
+ return py.io.saferepr(obj)
+
+ def repr_args(self, entry):
+ if self.funcargs:
+ args = []
+ for argname, argvalue in entry.frame.getargs(var=True):
+ args.append((argname, self._saferepr(argvalue)))
+ return ReprFuncArgs(args)
+
+ def get_source(self, source, line_index=-1, excinfo=None, short=False):
+ """ return formatted and marked up source lines. """
+ import _pytest._code
+ lines = []
+ if source is None or line_index >= len(source.lines):
+ source = _pytest._code.Source("???")
+ line_index = 0
+ if line_index < 0:
+ line_index += len(source)
+ space_prefix = " "
+ if short:
+ lines.append(space_prefix + source.lines[line_index].strip())
+ else:
+ for line in source.lines[:line_index]:
+ lines.append(space_prefix + line)
+ lines.append(self.flow_marker + " " + source.lines[line_index])
+ for line in source.lines[line_index+1:]:
+ lines.append(space_prefix + line)
+ if excinfo is not None:
+ indent = 4 if short else self._getindent(source)
+ lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+ return lines
+
+ def get_exconly(self, excinfo, indent=4, markall=False):
+ lines = []
+ indent = " " * indent
+ # get the real exception information out
+ exlines = excinfo.exconly(tryshort=True).split('\n')
+ failindent = self.fail_marker + indent[1:]
+ for line in exlines:
+ lines.append(failindent + line)
+ if not markall:
+ failindent = indent
+ return lines
+
+ def repr_locals(self, locals):
+ if self.showlocals:
+ lines = []
+ keys = [loc for loc in locals if loc[0] != "@"]
+ keys.sort()
+ for name in keys:
+ value = locals[name]
+ if name == '__builtins__':
+ lines.append("__builtins__ = <builtins>")
+ else:
+ # This formatting could all be handled by the
+ # _repr() function, which is only reprlib.Repr in
+ # disguise, so is very configurable.
+ str_repr = self._saferepr(value)
+ #if len(str_repr) < 70 or not isinstance(value,
+ # (list, tuple, dict)):
+ lines.append("%-10s = %s" %(name, str_repr))
+ #else:
+ # self._line("%-10s =\\" % (name,))
+ # # XXX
+ # py.std.pprint.pprint(value, stream=self.excinfowriter)
+ return ReprLocals(lines)
+
+ def repr_traceback_entry(self, entry, excinfo=None):
+ import _pytest._code
+ source = self._getentrysource(entry)
+ if source is None:
+ source = _pytest._code.Source("???")
+ line_index = 0
+ else:
+ # entry.getfirstlinesource() can be -1, should be 0 on jython
+ line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+ lines = []
+ style = entry._repr_style
+ if style is None:
+ style = self.style
+ if style in ("short", "long"):
+ short = style == "short"
+ reprargs = self.repr_args(entry) if not short else None
+ s = self.get_source(source, line_index, excinfo, short=short)
+ lines.extend(s)
+ if short:
+ message = "in %s" %(entry.name)
+ else:
+ message = excinfo and excinfo.typename or ""
+ path = self._makepath(entry.path)
+ filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+ localsrepr = None
+ if not short:
+ localsrepr = self.repr_locals(entry.locals)
+ return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+ if excinfo:
+ lines.extend(self.get_exconly(excinfo, indent=4))
+ return ReprEntry(lines, None, None, None, style)
+
+ def _makepath(self, path):
+ if not self.abspath:
+ try:
+ np = py.path.local().bestrelpath(path)
+ except OSError:
+ return path
+ if len(np) < len(str(path)):
+ path = np
+ return path
+
+ def repr_traceback(self, excinfo):
+ traceback = excinfo.traceback
+ if self.tbfilter:
+ traceback = traceback.filter()
+ recursionindex = None
+ if excinfo.errisinstance(RuntimeError):
+ if "maximum recursion depth exceeded" in str(excinfo.value):
+ recursionindex = traceback.recursionindex()
+ last = traceback[-1]
+ entries = []
+ extraline = None
+ for index, entry in enumerate(traceback):
+ einfo = (last == entry) and excinfo or None
+ reprentry = self.repr_traceback_entry(entry, einfo)
+ entries.append(reprentry)
+ if index == recursionindex:
+ extraline = "!!! Recursion detected (same locals & position)"
+ break
+ return ReprTraceback(entries, extraline, style=self.style)
+
+ def repr_excinfo(self, excinfo):
+ reprtraceback = self.repr_traceback(excinfo)
+ reprcrash = excinfo._getreprcrash()
+ return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+ def __str__(self):
+ s = self.__unicode__()
+ if sys.version_info[0] < 3:
+ s = s.encode('utf-8')
+ return s
+
+ def __unicode__(self):
+ # FYI this is called from pytest-xdist's serialization of exception
+ # information.
+ io = py.io.TextIO()
+ tw = py.io.TerminalWriter(file=io)
+ self.toterminal(tw)
+ return io.getvalue().strip()
+
+ def __repr__(self):
+ return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ReprExceptionInfo(TerminalRepr):
+ def __init__(self, reprtraceback, reprcrash):
+ self.reprtraceback = reprtraceback
+ self.reprcrash = reprcrash
+ self.sections = []
+
+ def addsection(self, name, content, sep="-"):
+ self.sections.append((name, content, sep))
+
+ def toterminal(self, tw):
+ self.reprtraceback.toterminal(tw)
+ for name, content, sep in self.sections:
+ tw.sep(sep, name)
+ tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+ entrysep = "_ "
+
+ def __init__(self, reprentries, extraline, style):
+ self.reprentries = reprentries
+ self.extraline = extraline
+ self.style = style
+
+ def toterminal(self, tw):
+ # the entries might have different styles
+ for i, entry in enumerate(self.reprentries):
+ if entry.style == "long":
+ tw.line("")
+ entry.toterminal(tw)
+ if i < len(self.reprentries) - 1:
+ next_entry = self.reprentries[i+1]
+ if entry.style == "long" or \
+ entry.style == "short" and next_entry.style == "long":
+ tw.sep(self.entrysep)
+
+ if self.extraline:
+ tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+ def __init__(self, tblines):
+ self.style = "native"
+ self.reprentries = [ReprEntryNative(tblines)]
+ self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+ style = "native"
+
+ def __init__(self, tblines):
+ self.lines = tblines
+
+ def toterminal(self, tw):
+ tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+ localssep = "_ "
+
+ def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+ self.lines = lines
+ self.reprfuncargs = reprfuncargs
+ self.reprlocals = reprlocals
+ self.reprfileloc = filelocrepr
+ self.style = style
+
+ def toterminal(self, tw):
+ if self.style == "short":
+ self.reprfileloc.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ #tw.line("")
+ return
+ if self.reprfuncargs:
+ self.reprfuncargs.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ if self.reprlocals:
+ #tw.sep(self.localssep, "Locals")
+ tw.line("")
+ self.reprlocals.toterminal(tw)
+ if self.reprfileloc:
+ if self.lines:
+ tw.line("")
+ self.reprfileloc.toterminal(tw)
+
+ def __str__(self):
+ return "%s\n%s\n%s" % ("\n".join(self.lines),
+ self.reprlocals,
+ self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+ def __init__(self, path, lineno, message):
+ self.path = str(path)
+ self.lineno = lineno
+ self.message = message
+
+ def toterminal(self, tw):
+ # filename and lineno output for each entry,
+ # using an output format that most editors unterstand
+ msg = self.message
+ i = msg.find("\n")
+ if i != -1:
+ msg = msg[:i]
+ tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+ def __init__(self, lines):
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+ def __init__(self, args):
+ self.args = args
+
+ def toterminal(self, tw):
+ if self.args:
+ linesofar = ""
+ for name, value in self.args:
+ ns = "%s = %s" %(name, value)
+ if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+ if linesofar:
+ tw.line(linesofar)
+ linesofar = ns
+ else:
+ if linesofar:
+ linesofar += ", " + ns
+ else:
+ linesofar = ns
+ if linesofar:
+ tw.line(linesofar)
+ tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+ """ put compile and AssertionError builtins to Python's builtins. """
+ if assertion:
+ from _pytest.assertion import reinterpret
+ l = oldbuiltins.setdefault('AssertionError', [])
+ l.append(py.builtin.builtins.AssertionError)
+ py.builtin.builtins.AssertionError = reinterpret.AssertionError
+ if compile:
+ import _pytest._code
+ l = oldbuiltins.setdefault('compile', [])
+ l.append(py.builtin.builtins.compile)
+ py.builtin.builtins.compile = _pytest._code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+ """ remove compile and AssertionError builtins from Python builtins. """
+ if assertion:
+ py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+ if compile:
+ py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj, trycall=True):
+ """ return code object for given function. """
+ try:
+ return obj.__code__
+ except AttributeError:
+ obj = getattr(obj, 'im_func', obj)
+ obj = getattr(obj, 'func_code', obj)
+ obj = getattr(obj, 'f_code', obj)
+ obj = getattr(obj, '__code__', obj)
+ if trycall and not hasattr(obj, 'co_firstlineno'):
+ if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+ x = getrawcode(obj.__call__, trycall=False)
+ if hasattr(x, 'co_firstlineno'):
+ return x
+ return obj
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/_code/source.py b/testing/web-platform/tests/tools/pytest/_pytest/_code/source.py
new file mode 100644
index 000000000..a1521f8a2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/_code/source.py
@@ -0,0 +1,421 @@
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+from types import ModuleType
+cpy_compile = compile
+
+try:
+ import _ast
+ from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+ _AST_FLAG = 0
+ _ast = None
+
+
+class Source(object):
+ """ a immutable object holding a source code fragment,
+ possibly deindenting it.
+ """
+ _compilecounter = 0
+ def __init__(self, *parts, **kwargs):
+ self.lines = lines = []
+ de = kwargs.get('deindent', True)
+ rstrip = kwargs.get('rstrip', True)
+ for part in parts:
+ if not part:
+ partlines = []
+ if isinstance(part, Source):
+ partlines = part.lines
+ elif isinstance(part, (tuple, list)):
+ partlines = [x.rstrip("\n") for x in part]
+ elif isinstance(part, py.builtin._basestring):
+ partlines = part.split('\n')
+ if rstrip:
+ while partlines:
+ if partlines[-1].strip():
+ break
+ partlines.pop()
+ else:
+ partlines = getsource(part, deindent=de).lines
+ if de:
+ partlines = deindent(partlines)
+ lines.extend(partlines)
+
+ def __eq__(self, other):
+ try:
+ return self.lines == other.lines
+ except AttributeError:
+ if isinstance(other, str):
+ return str(self) == other
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self.lines[key]
+ else:
+ if key.step not in (None, 1):
+ raise IndexError("cannot slice a Source with a step")
+ return self.__getslice__(key.start, key.stop)
+
+ def __len__(self):
+ return len(self.lines)
+
+ def __getslice__(self, start, end):
+ newsource = Source()
+ newsource.lines = self.lines[start:end]
+ return newsource
+
+ def strip(self):
+ """ return new source object with trailing
+ and leading blank lines removed.
+ """
+ start, end = 0, len(self)
+ while start < end and not self.lines[start].strip():
+ start += 1
+ while end > start and not self.lines[end-1].strip():
+ end -= 1
+ source = Source()
+ source.lines[:] = self.lines[start:end]
+ return source
+
+ def putaround(self, before='', after='', indent=' ' * 4):
+ """ return a copy of the source object with
+ 'before' and 'after' wrapped around it.
+ """
+ before = Source(before)
+ after = Source(after)
+ newsource = Source()
+ lines = [ (indent + line) for line in self.lines]
+ newsource.lines = before.lines + lines + after.lines
+ return newsource
+
+ def indent(self, indent=' ' * 4):
+ """ return a copy of the source object with
+ all lines indented by the given indent-string.
+ """
+ newsource = Source()
+ newsource.lines = [(indent+line) for line in self.lines]
+ return newsource
+
+ def getstatement(self, lineno, assertion=False):
+ """ return Source statement which contains the
+ given linenumber (counted from 0).
+ """
+ start, end = self.getstatementrange(lineno, assertion)
+ return self[start:end]
+
+ def getstatementrange(self, lineno, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ """
+ if not (0 <= lineno < len(self)):
+ raise IndexError("lineno out of range")
+ ast, start, end = getstatementrange_ast(lineno, self)
+ return start, end
+
+ def deindent(self, offset=None):
+ """ return a new source object deindented by offset.
+ If offset is None then guess an indentation offset from
+ the first non-blank line. Subsequent lines which have a
+ lower indentation offset will be copied verbatim as
+ they are assumed to be part of multilines.
+ """
+ # XXX maybe use the tokenizer to properly handle multiline
+ # strings etc.pp?
+ newsource = Source()
+ newsource.lines[:] = deindent(self.lines, offset)
+ return newsource
+
+ def isparseable(self, deindent=True):
+ """ return True if source is parseable, heuristically
+ deindenting it by default.
+ """
+ try:
+ import parser
+ except ImportError:
+ syntax_checker = lambda x: compile(x, 'asd', 'exec')
+ else:
+ syntax_checker = parser.suite
+
+ if deindent:
+ source = str(self.deindent())
+ else:
+ source = str(self)
+ try:
+ #compile(source+'\n', "x", "exec")
+ syntax_checker(source+'\n')
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return False
+ else:
+ return True
+
+ def __str__(self):
+ return "\n".join(self.lines)
+
+ def compile(self, filename=None, mode='exec',
+ flag=generators.compiler_flag,
+ dont_inherit=0, _genframe=None):
+ """ return compiled code object. if filename is None
+ invent an artificial filename which displays
+ the source/line position of the caller frame.
+ """
+ if not filename or py.path.local(filename).check(file=0):
+ if _genframe is None:
+ _genframe = sys._getframe(1) # the caller
+ fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+ base = "<%d-codegen " % self._compilecounter
+ self.__class__._compilecounter += 1
+ if not filename:
+ filename = base + '%s:%d>' % (fn, lineno)
+ else:
+ filename = base + '%r %s:%d>' % (filename, fn, lineno)
+ source = "\n".join(self.lines) + '\n'
+ try:
+ co = cpy_compile(source, filename, mode, flag)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ # re-represent syntax errors from parsing python strings
+ msglines = self.lines[:ex.lineno]
+ if ex.offset:
+ msglines.append(" "*ex.offset + '^')
+ msglines.append("(code was compiled probably from here: %s)" % filename)
+ newex = SyntaxError('\n'.join(msglines))
+ newex.offset = ex.offset
+ newex.lineno = ex.lineno
+ newex.text = ex.text
+ raise newex
+ else:
+ if flag & _AST_FLAG:
+ return co
+ lines = [(x + "\n") for x in self.lines]
+ if sys.version_info[0] >= 3:
+ # XXX py3's inspect.getsourcefile() checks for a module
+ # and a pep302 __loader__ ... we don't have a module
+ # at code compile-time so we need to fake it here
+ m = ModuleType("_pycodecompile_pseudo_module")
+ py.std.inspect.modulesbyfile[filename] = None
+ py.std.sys.modules[None] = m
+ m.__loader__ = 1
+ py.std.linecache.cache[filename] = (1, None, lines, filename)
+ return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+ generators.compiler_flag, dont_inherit=0):
+ """ compile the given source to a raw code object,
+ and maintain an internal cache which allows later
+ retrieval of the source code for the code object
+ and any recursively created code objects.
+ """
+ if _ast is not None and isinstance(source, _ast.AST):
+ # XXX should Source support having AST?
+ return cpy_compile(source, filename, mode, flags, dont_inherit)
+ _genframe = sys._getframe(1) # the caller
+ s = Source(source)
+ co = s.compile(filename, mode, flags, _genframe=_genframe)
+ return co
+
+
+def getfslineno(obj):
+ """ Return source location (path, lineno) for the given object.
+ If the source cannot be determined return ("", -1)
+ """
+ import _pytest._code
+ try:
+ code = _pytest._code.Code(obj)
+ except TypeError:
+ try:
+ fn = (py.std.inspect.getsourcefile(obj) or
+ py.std.inspect.getfile(obj))
+ except TypeError:
+ return "", -1
+
+ fspath = fn and py.path.local(fn) or None
+ lineno = -1
+ if fspath:
+ try:
+ _, lineno = findsource(obj)
+ except IOError:
+ pass
+ else:
+ fspath = code.path
+ lineno = code.firstlineno
+ assert isinstance(lineno, int)
+ return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+ try:
+ sourcelines, lineno = py.std.inspect.findsource(obj)
+ except py.builtin._sysex:
+ raise
+ except:
+ return None, -1
+ source = Source()
+ source.lines = [line.rstrip() for line in sourcelines]
+ return source, lineno
+
+def getsource(obj, **kwargs):
+ import _pytest._code
+ obj = _pytest._code.getrawcode(obj)
+ try:
+ strsrc = inspect.getsource(obj)
+ except IndentationError:
+ strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+ assert isinstance(strsrc, str)
+ return Source(strsrc, **kwargs)
+
+def deindent(lines, offset=None):
+ if offset is None:
+ for line in lines:
+ line = line.expandtabs()
+ s = line.lstrip()
+ if s:
+ offset = len(line)-len(s)
+ break
+ else:
+ offset = 0
+ if offset == 0:
+ return list(lines)
+ newlines = []
+ def readline_generator(lines):
+ for line in lines:
+ yield line + '\n'
+ while True:
+ yield ''
+
+ it = readline_generator(lines)
+
+ try:
+ for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+ if sline > len(lines):
+ break # End of input reached
+ if sline > len(newlines):
+ line = lines[sline - 1].expandtabs()
+ if line.lstrip() and line[:offset].isspace():
+ line = line[offset:] # Deindent
+ newlines.append(line)
+
+ for i in range(sline, eline):
+ # Don't deindent continuing lines of
+ # multiline tokens (i.e. multiline strings)
+ newlines.append(lines[i])
+ except (IndentationError, tokenize.TokenError):
+ pass
+ # Add any lines we didn't see. E.g. if an exception was raised.
+ newlines.extend(lines[len(newlines):])
+ return newlines
+
+
+def get_statement_startend2(lineno, node):
+ import ast
+ # flatten all statements and except handlers into one lineno-list
+ # AST's line numbers start indexing at 1
+ l = []
+ for x in ast.walk(node):
+ if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+ l.append(x.lineno - 1)
+ for name in "finalbody", "orelse":
+ val = getattr(x, name, None)
+ if val:
+ # treat the finally/orelse part as its own statement
+ l.append(val[0].lineno - 1 - 1)
+ l.sort()
+ insert_index = bisect_right(l, lineno)
+ start = l[insert_index - 1]
+ if insert_index >= len(l):
+ end = None
+ else:
+ end = l[insert_index]
+ return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+ if astnode is None:
+ content = str(source)
+ if sys.version_info < (2,7):
+ content += "\n"
+ try:
+ astnode = compile(content, "source", "exec", 1024) # 1024 for AST
+ except ValueError:
+ start, end = getstatementrange_old(lineno, source, assertion)
+ return None, start, end
+ start, end = get_statement_startend2(lineno, astnode)
+ # we need to correct the end:
+ # - ast-parsing strips comments
+ # - there might be empty lines
+ # - we might have lesser indented code blocks at the end
+ if end is None:
+ end = len(source.lines)
+
+ if end > start + 1:
+ # make sure we don't span differently indented code blocks
+ # by using the BlockFinder helper used which inspect.getsource() uses itself
+ block_finder = inspect.BlockFinder()
+ # if we start with an indented line, put blockfinder to "started" mode
+ block_finder.started = source.lines[start][0].isspace()
+ it = ((x + "\n") for x in source.lines[start:end])
+ try:
+ for tok in tokenize.generate_tokens(lambda: next(it)):
+ block_finder.tokeneater(*tok)
+ except (inspect.EndOfBlock, IndentationError):
+ end = block_finder.last + start
+ except Exception:
+ pass
+
+ # the end might still point to a comment or empty line, correct it
+ while end:
+ line = source.lines[end - 1].lstrip()
+ if line.startswith("#") or not line:
+ end -= 1
+ else:
+ break
+ return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ raise an IndexError if no such statementrange can be found.
+ """
+ # XXX this logic is only used on python2.4 and below
+ # 1. find the start of the statement
+ from codeop import compile_command
+ for start in range(lineno, -1, -1):
+ if assertion:
+ line = source.lines[start]
+ # the following lines are not fully tested, change with care
+ if 'super' in line and 'self' in line and '__init__' in line:
+ raise IndexError("likely a subclass")
+ if "assert" not in line and "raise" not in line:
+ continue
+ trylines = source.lines[start:lineno+1]
+ # quick hack to prepare parsing an indented line with
+ # compile_command() (which errors on "return" outside defs)
+ trylines.insert(0, 'def xxx():')
+ trysource = '\n '.join(trylines)
+ # ^ space here
+ try:
+ compile_command(trysource)
+ except (SyntaxError, OverflowError, ValueError):
+ continue
+
+ # 2. find the end of the statement
+ for end in range(lineno+1, len(source)+1):
+ trysource = source[start:end]
+ if trysource.isparseable():
+ return start, end
+ raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/_pluggy.py b/testing/web-platform/tests/tools/pytest/_pytest/_pluggy.py
new file mode 100644
index 000000000..87d32cf8d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/_pluggy.py
@@ -0,0 +1,11 @@
+"""
+imports symbols from vendored "pluggy" if available, otherwise
+falls back to importing "pluggy" from the default namespace.
+"""
+
+try:
+ from _pytest.vendored_packages.pluggy import * # noqa
+ from _pytest.vendored_packages.pluggy import __version__ # noqa
+except ImportError:
+ from pluggy import * # noqa
+ from pluggy import __version__ # noqa
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/assertion/__init__.py b/testing/web-platform/tests/tools/pytest/_pytest/assertion/__init__.py
new file mode 100644
index 000000000..6921deb2a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/assertion/__init__.py
@@ -0,0 +1,176 @@
+"""
+support for presenting detailed information in failing assertions.
+"""
+import py
+import os
+import sys
+from _pytest.monkeypatch import monkeypatch
+from _pytest.assertion import util
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption('--assert',
+ action="store",
+ dest="assertmode",
+ choices=("rewrite", "reinterp", "plain",),
+ default="rewrite",
+ metavar="MODE",
+ help="""control assertion debugging tools. 'plain'
+ performs no assertion debugging. 'reinterp'
+ reinterprets assert statements after they failed
+ to provide assertion expression information.
+ 'rewrite' (the default) rewrites assert
+ statements in test modules on import to
+ provide assert expression information. """)
+ group.addoption('--no-assert',
+ action="store_true",
+ default=False,
+ dest="noassert",
+ help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic',
+ action="store_true",
+ default=False,
+ help="DEPRECATED equivalent to --assert=plain")
+
+
+class AssertionState:
+ """State for the assertion plugin."""
+
+ def __init__(self, config, mode):
+ self.mode = mode
+ self.trace = config.trace.root.get("assertion")
+
+
+def pytest_configure(config):
+ mode = config.getvalue("assertmode")
+ if config.getvalue("noassert") or config.getvalue("nomagic"):
+ mode = "plain"
+ if mode == "rewrite":
+ try:
+ import ast # noqa
+ except ImportError:
+ mode = "reinterp"
+ else:
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
+ mode = "reinterp"
+ if mode != "plain":
+ _load_modules(mode)
+ m = monkeypatch()
+ config._cleanup.append(m.undo)
+ m.setattr(py.builtin.builtins, 'AssertionError',
+ reinterpret.AssertionError) # noqa
+ hook = None
+ if mode == "rewrite":
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
+ warn_about_missing_assertion(mode)
+ config._assertstate = AssertionState(config, mode)
+ config._assertstate.hook = hook
+ config._assertstate.trace("configured with mode set to %r" % (mode,))
+ def undo():
+ hook = config._assertstate.hook
+ if hook is not None and hook in sys.meta_path:
+ sys.meta_path.remove(hook)
+ config.add_cleanup(undo)
+
+
+def pytest_collection(session):
+ # this hook is only called when test modules are collected
+ # so for example not in the master process of pytest-xdist
+ # (which does not collect test modules)
+ hook = session.config._assertstate.hook
+ if hook is not None:
+ hook.set_session(session)
+
+
+def _running_on_ci():
+ """Check if we're currently running on a CI system."""
+ env_vars = ['CI', 'BUILD_NUMBER']
+ return any(var in os.environ for var in env_vars)
+
+
+def pytest_runtest_setup(item):
+ """Setup the pytest_assertrepr_compare hook
+
+ The newinterpret and rewrite modules will use util._reprcompare if
+ it exists to use custom reporting via the
+ pytest_assertrepr_compare hook. This sets up this custom
+ comparison for the test.
+ """
+ def callbinrepr(op, left, right):
+ """Call the pytest_assertrepr_compare hook and prepare the result
+
+ This uses the first result from the hook and then ensures the
+ following:
+ * Overly verbose explanations are dropped unless -vv was used or
+ running on a CI.
+ * Embedded newlines are escaped to help util.format_explanation()
+ later.
+ * If the rewrite mode is used embedded %-characters are replaced
+ to protect later % formatting.
+
+ The result can be formatted by util.format_explanation() for
+ pretty printing.
+ """
+ hook_result = item.ihook.pytest_assertrepr_compare(
+ config=item.config, op=op, left=left, right=right)
+ for new_expl in hook_result:
+ if new_expl:
+ if (sum(len(p) for p in new_expl[1:]) > 80*8 and
+ item.config.option.verbose < 2 and
+ not _running_on_ci()):
+ show_max = 10
+ truncated_lines = len(new_expl) - show_max
+ new_expl[show_max:] = [py.builtin._totext(
+ 'Detailed information truncated (%d more lines)'
+ ', use "-vv" to show' % truncated_lines)]
+ new_expl = [line.replace("\n", "\\n") for line in new_expl]
+ res = py.builtin._totext("\n~").join(new_expl)
+ if item.config.getvalue("assertmode") == "rewrite":
+ res = res.replace("%", "%%")
+ return res
+ util._reprcompare = callbinrepr
+
+
+def pytest_runtest_teardown(item):
+ util._reprcompare = None
+
+
+def pytest_sessionfinish(session):
+ hook = session.config._assertstate.hook
+ if hook is not None:
+ hook.session = None
+
+
+def _load_modules(mode):
+ """Lazily import assertion related code."""
+ global rewrite, reinterpret
+ from _pytest.assertion import reinterpret # noqa
+ if mode == "rewrite":
+ from _pytest.assertion import rewrite # noqa
+
+
+def warn_about_missing_assertion(mode):
+ try:
+ assert False
+ except AssertionError:
+ pass
+ else:
+ if mode == "rewrite":
+ specifically = ("assertions which are not in test modules "
+ "will be ignored")
+ else:
+ specifically = "failing tests may report as passing"
+
+ sys.stderr.write("WARNING: " + specifically +
+ " because assert statements are not executed "
+ "by the underlying Python interpreter "
+ "(are you using python -O?)\n")
+
+
+# Expose this plugin's implementation for the pytest_assertrepr_compare hook
+pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/assertion/reinterpret.py b/testing/web-platform/tests/tools/pytest/_pytest/assertion/reinterpret.py
new file mode 100644
index 000000000..f4262c3ac
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/assertion/reinterpret.py
@@ -0,0 +1,407 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+"""
+import ast
+import sys
+
+import _pytest._code
+import py
+from _pytest.assertion import util
+u = py.builtin._totext
+
+
+class AssertionError(util.BuiltinAssertionError):
+ def __init__(self, *args):
+ util.BuiltinAssertionError.__init__(self, *args)
+ if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
+ try:
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
+ else:
+ f = _pytest._code.Frame(sys._getframe(1))
+ try:
+ source = f.code.fullsource
+ if source is not None:
+ try:
+ source = source.getstatement(f.lineno, assertion=True)
+ except IndexError:
+ source = None
+ else:
+ source = str(source.deindent()).strip()
+ except py.error.ENOENT:
+ source = None
+ # this can also occur during reinterpretation, when the
+ # co_filename is set to "<run>".
+ if source:
+ self.msg = reinterpret(source, f, should_fail=True)
+ else:
+ self.msg = "<could not determine information>"
+ if not self.args:
+ self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+ AssertionError.__module__ = "builtins"
+
+if sys.platform.startswith("java"):
+ # See http://bugs.jython.org/issue1497
+ _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+ "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+ "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+ "List", "Tuple")
+ _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+ "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+ "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+ "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+ _expr_nodes = set(getattr(ast, name) for name in _exprs)
+ _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+ def _is_ast_expr(node):
+ return node.__class__ in _expr_nodes
+ def _is_ast_stmt(node):
+ return node.__class__ in _stmt_nodes
+else:
+ def _is_ast_expr(node):
+ return isinstance(node, ast.expr)
+ def _is_ast_stmt(node):
+ return isinstance(node, ast.stmt)
+
+try:
+ _Starred = ast.Starred
+except AttributeError:
+ # Python 2. Define a dummy class so isinstance() will always be False.
+ class _Starred(object): pass
+
+
+class Failure(Exception):
+ """Error found while interpreting AST."""
+
+ def __init__(self, explanation=""):
+ self.cause = sys.exc_info()
+ self.explanation = explanation
+
+
+def reinterpret(source, frame, should_fail=False):
+ mod = ast.parse(source)
+ visitor = DebugInterpreter(frame)
+ try:
+ visitor.visit(mod)
+ except Failure:
+ failure = sys.exc_info()[1]
+ return getfailure(failure)
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --assert=plain)")
+
+def run(offending_line, frame=None):
+ if frame is None:
+ frame = _pytest._code.Frame(sys._getframe(1))
+ return reinterpret(offending_line, frame)
+
+def getfailure(e):
+ explanation = util.format_explanation(e.explanation)
+ value = e.cause[1]
+ if str(value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.cause[0].__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+operator_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Pow : "**",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+ """Interpret AST nodes to gleam useful debugging information. """
+
+ def __init__(self, frame):
+ self.frame = frame
+
+ def generic_visit(self, node):
+ # Fallback when we don't have a special implementation.
+ if _is_ast_expr(node):
+ mod = ast.Expression(node)
+ co = self._compile(mod)
+ try:
+ result = self.frame.eval(co)
+ except Exception:
+ raise Failure()
+ explanation = self.frame.repr(result)
+ return explanation, result
+ elif _is_ast_stmt(node):
+ mod = ast.Module([node])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co)
+ except Exception:
+ raise Failure()
+ return None, None
+ else:
+ raise AssertionError("can't handle %s" %(node,))
+
+ def _compile(self, source, mode="eval"):
+ return compile(source, "<assertion interpretation>", mode)
+
+ def visit_Expr(self, expr):
+ return self.visit(expr.value)
+
+ def visit_Module(self, mod):
+ for stmt in mod.body:
+ self.visit(stmt)
+
+ def visit_Name(self, name):
+ explanation, result = self.generic_visit(name)
+ # See if the name is local.
+ source = "%r in locals() is not globals()" % (name.id,)
+ co = self._compile(source)
+ try:
+ local = self.frame.eval(co)
+ except Exception:
+ # have to assume it isn't
+ local = None
+ if local is None or not self.frame.is_true(local):
+ return name.id, result
+ return explanation, result
+
+ def visit_Compare(self, comp):
+ left = comp.left
+ left_explanation, left_result = self.visit(left)
+ for op, next_op in zip(comp.ops, comp.comparators):
+ next_explanation, next_result = self.visit(next_op)
+ op_symbol = operator_map[op.__class__]
+ explanation = "%s %s %s" % (left_explanation, op_symbol,
+ next_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=next_result)
+ except Exception:
+ raise Failure(explanation)
+ try:
+ if not self.frame.is_true(result):
+ break
+ except KeyboardInterrupt:
+ raise
+ except:
+ break
+ left_explanation, left_result = next_explanation, next_result
+
+ if util._reprcompare is not None:
+ res = util._reprcompare(op_symbol, left_result, next_result)
+ if res:
+ explanation = res
+ return explanation, result
+
+ def visit_BoolOp(self, boolop):
+ is_or = isinstance(boolop.op, ast.Or)
+ explanations = []
+ for operand in boolop.values:
+ explanation, result = self.visit(operand)
+ explanations.append(explanation)
+ if result == is_or:
+ break
+ name = is_or and " or " or " and "
+ explanation = "(" + name.join(explanations) + ")"
+ return explanation, result
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_explanation, operand_result = self.visit(unary.operand)
+ explanation = pattern % (operand_explanation,)
+ co = self._compile(pattern % ("__exprinfo_expr",))
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=operand_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_BinOp(self, binop):
+ left_explanation, left_result = self.visit(binop.left)
+ right_explanation, right_result = self.visit(binop.right)
+ symbol = operator_map[binop.op.__class__]
+ explanation = "(%s %s %s)" % (left_explanation, symbol,
+ right_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=right_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_Call(self, call):
+ func_explanation, func = self.visit(call.func)
+ arg_explanations = []
+ ns = {"__exprinfo_func" : func}
+ arguments = []
+ for arg in call.args:
+ arg_explanation, arg_result = self.visit(arg)
+ if isinstance(arg, _Starred):
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+ else:
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ arguments.append(arg_name)
+ arg_explanations.append(arg_explanation)
+ for keyword in call.keywords:
+ arg_explanation, arg_result = self.visit(keyword.value)
+ if keyword.arg:
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ keyword_source = "%s=%%s" % (keyword.arg)
+ arguments.append(keyword_source % (arg_name,))
+ arg_explanations.append(keyword_source % (arg_explanation,))
+ else:
+ arg_name = "__exprinfo_kwds"
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+
+ ns[arg_name] = arg_result
+
+ if getattr(call, 'starargs', None):
+ arg_explanation, arg_result = self.visit(call.starargs)
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+
+ if getattr(call, 'kwargs', None):
+ arg_explanation, arg_result = self.visit(call.kwargs)
+ arg_name = "__exprinfo_kwds"
+ ns[arg_name] = arg_result
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+ args_explained = ", ".join(arg_explanations)
+ explanation = "%s(%s)" % (func_explanation, args_explained)
+ args = ", ".join(arguments)
+ source = "__exprinfo_func(%s)" % (args,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, **ns)
+ except Exception:
+ raise Failure(explanation)
+ pattern = "%s\n{%s = %s\n}"
+ rep = self.frame.repr(result)
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def _is_builtin_name(self, name):
+ pattern = "%r not in globals() and %r not in locals()"
+ source = pattern % (name.id, name.id)
+ co = self._compile(source)
+ try:
+ return self.frame.eval(co)
+ except Exception:
+ return False
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ source_explanation, source_result = self.visit(attr.value)
+ explanation = "%s.%s" % (source_explanation, attr.attr)
+ source = "__exprinfo_expr.%s" % (attr.attr,)
+ co = self._compile(source)
+ try:
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except AttributeError:
+ # Maybe the attribute name needs to be mangled?
+ if not attr.attr.startswith("__") or attr.attr.endswith("__"):
+ raise
+ source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
+ co = self._compile(source)
+ class_name = self.frame.eval(co, __exprinfo_expr=source_result)
+ mangled_attr = "_" + class_name + attr.attr
+ source = "__exprinfo_expr.%s" % (mangled_attr,)
+ co = self._compile(source)
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ raise Failure(explanation)
+ explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+ self.frame.repr(result),
+ source_explanation, attr.attr)
+ # Check if the attr is from an instance.
+ source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+ source = source % (attr.attr,)
+ co = self._compile(source)
+ try:
+ from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ from_instance = None
+ if from_instance is None or self.frame.is_true(from_instance):
+ rep = self.frame.repr(result)
+ pattern = "%s\n{%s = %s\n}"
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def visit_Assert(self, assrt):
+ test_explanation, test_result = self.visit(assrt.test)
+ explanation = "assert %s" % (test_explanation,)
+ if not self.frame.is_true(test_result):
+ try:
+ raise util.BuiltinAssertionError
+ except Exception:
+ raise Failure(explanation)
+ return explanation, test_result
+
+ def visit_Assign(self, assign):
+ value_explanation, value_result = self.visit(assign.value)
+ explanation = "... = %s" % (value_explanation,)
+ name = ast.Name("__exprinfo_expr", ast.Load(),
+ lineno=assign.value.lineno,
+ col_offset=assign.value.col_offset)
+ new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+ col_offset=assign.col_offset)
+ mod = ast.Module([new_assign])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co, __exprinfo_expr=value_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, value_result
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/assertion/rewrite.py b/testing/web-platform/tests/tools/pytest/_pytest/assertion/rewrite.py
new file mode 100644
index 000000000..14b8e49db
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/assertion/rewrite.py
@@ -0,0 +1,885 @@
+"""Rewrite assertion AST to produce nice error messages"""
+
+import ast
+import errno
+import itertools
+import imp
+import marshal
+import os
+import re
+import struct
+import sys
+import types
+
+import py
+from _pytest.assertion import util
+
+
+# pytest caches rewritten pycs in __pycache__.
+if hasattr(imp, "get_tag"):
+ PYTEST_TAG = imp.get_tag() + "-PYTEST"
+else:
+ if hasattr(sys, "pypy_version_info"):
+ impl = "pypy"
+ elif sys.platform == "java":
+ impl = "jython"
+ else:
+ impl = "cpython"
+ ver = sys.version_info
+ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
+ del ver, impl
+
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
+PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
+
+REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
+
+if sys.version_info >= (3,5):
+ ast_Call = ast.Call
+else:
+ ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
+
+
+class AssertionRewritingHook(object):
+ """PEP302 Import hook which rewrites asserts."""
+
+ def __init__(self):
+ self.session = None
+ self.modules = {}
+ self._register_with_pkg_resources()
+
+ def set_session(self, session):
+ self.fnpats = session.config.getini("python_files")
+ self.session = session
+
+ def find_module(self, name, path=None):
+ if self.session is None:
+ return None
+ sess = self.session
+ state = sess.config._assertstate
+ state.trace("find_module called for: %s" % name)
+ names = name.rsplit(".", 1)
+ lastname = names[-1]
+ pth = None
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
+ if pth is None:
+ try:
+ fd, fn, desc = imp.find_module(lastname, path)
+ except ImportError:
+ return None
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ if tp == imp.PY_COMPILED:
+ if hasattr(imp, "source_from_cache"):
+ fn = imp.source_from_cache(fn)
+ else:
+ fn = fn[:-1]
+ elif tp != imp.PY_SOURCE:
+ # Don't know what this is.
+ return None
+ else:
+ fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
+ fn_pypath = py.path.local(fn)
+ # Is this a test file?
+ if not sess.isinitpath(fn):
+ # We have to be very careful here because imports in this code can
+ # trigger a cycle.
+ self.session = None
+ try:
+ for pat in self.fnpats:
+ if fn_pypath.fnmatch(pat):
+ state.trace("matched test file %r" % (fn,))
+ break
+ else:
+ return None
+ finally:
+ self.session = sess
+ else:
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
+ # The requested module looks like a test file, so rewrite it. This is
+ # the most magical part of the process: load the source, rewrite the
+ # asserts, and load the rewritten source. We also cache the rewritten
+ # module code in a special pyc. We must be aware of the possibility of
+ # concurrent pytest processes rewriting and loading pycs. To avoid
+ # tricky race conditions, we maintain the following invariant: The
+ # cached pyc is always a complete, valid pyc. Operations on it must be
+ # atomic. POSIX's atomic rename comes in handy.
+ write = not sys.dont_write_bytecode
+ cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
+ if write:
+ try:
+ os.mkdir(cache_dir)
+ except OSError:
+ e = sys.exc_info()[1].errno
+ if e == errno.EEXIST:
+ # Either the __pycache__ directory already exists (the
+ # common case) or it's blocked by a non-dir node. In the
+ # latter case, we'll ignore it in _write_pyc.
+ pass
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
+ # One of the path components was not a directory, likely
+ # because we're in a zip file.
+ write = False
+ elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
+ state.trace("read only directory: %r" % fn_pypath.dirname)
+ write = False
+ else:
+ raise
+ cache_name = fn_pypath.basename[:-3] + PYC_TAIL
+ pyc = os.path.join(cache_dir, cache_name)
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
+ co = _read_pyc(fn_pypath, pyc, state.trace)
+ if co is None:
+ state.trace("rewriting %r" % (fn,))
+ source_stat, co = _rewrite_test(state, fn_pypath)
+ if co is None:
+ # Probably a SyntaxError in the test.
+ return None
+ if write:
+ _make_rewritten_pyc(state, source_stat, pyc, co)
+ else:
+ state.trace("found cached rewritten pyc for %r" % (fn,))
+ self.modules[name] = co, pyc
+ return self
+
+ def load_module(self, name):
+ # If there is an existing module object named 'fullname' in
+ # sys.modules, the loader must use that existing module. (Otherwise,
+ # the reload() builtin will not work correctly.)
+ if name in sys.modules:
+ return sys.modules[name]
+
+ co, pyc = self.modules.pop(name)
+ # I wish I could just call imp.load_compiled here, but __file__ has to
+ # be set properly. In Python 3.2+, this all would be handled correctly
+ # by load_compiled.
+ mod = sys.modules[name] = imp.new_module(name)
+ try:
+ mod.__file__ = co.co_filename
+ # Normally, this attribute is 3.2+.
+ mod.__cached__ = pyc
+ mod.__loader__ = self
+ py.builtin.exec_(co, mod.__dict__)
+ except:
+ del sys.modules[name]
+ raise
+ return sys.modules[name]
+
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+ def get_data(self, pathname):
+ """Optional PEP302 get_data API.
+ """
+ with open(pathname, 'rb') as f:
+ return f.read()
+
+
+def _write_pyc(state, co, source_stat, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
+ try:
+ fp = open(pyc, "wb")
+ except IOError:
+ err = sys.exc_info()[1].errno
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
+ try:
+ fp.write(imp.get_magic())
+ mtime = int(source_stat.mtime)
+ size = source_stat.size & 0xFFFFFFFF
+ fp.write(struct.pack("<ll", mtime, size))
+ marshal.dump(co, fp)
+ finally:
+ fp.close()
+ return True
+
+RN = "\r\n".encode("utf-8")
+N = "\n".encode("utf-8")
+
+cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
+BOM_UTF8 = '\xef\xbb\xbf'
+
+def _rewrite_test(state, fn):
+ """Try to read and rewrite *fn* and return the code object."""
+ try:
+ stat = fn.stat()
+ source = fn.read("rb")
+ except EnvironmentError:
+ return None, None
+ if ASCII_IS_DEFAULT_ENCODING:
+ # ASCII is the default encoding in Python 2. Without a coding
+ # declaration, Python 2 will complain about any bytes in the file
+ # outside the ASCII range. Sadly, this behavior does not extend to
+ # compile() or ast.parse(), which prefer to interpret the bytes as
+ # latin-1. (At least they properly handle explicit coding cookies.) To
+ # preserve this error behavior, we could force ast.parse() to use ASCII
+ # as the encoding by inserting a coding cookie. Unfortunately, that
+ # messes up line numbers. Thus, we have to check ourselves if anything
+ # is outside the ASCII range in the case no encoding is explicitly
+ # declared. For more context, see issue #269. Yay for Python 3 which
+ # gets this right.
+ end1 = source.find("\n")
+ end2 = source.find("\n", end1 + 1)
+ if (not source.startswith(BOM_UTF8) and
+ cookie_re.match(source[0:end1]) is None and
+ cookie_re.match(source[end1 + 1:end2]) is None):
+ if hasattr(state, "_indecode"):
+ # encodings imported us again, so don't rewrite.
+ return None, None
+ state._indecode = True
+ try:
+ try:
+ source.decode("ascii")
+ except UnicodeDecodeError:
+ # Let it fail in real import.
+ return None, None
+ finally:
+ del state._indecode
+ # On Python versions which are not 2.7 and less than or equal to 3.1, the
+ # parser expects *nix newlines.
+ if REWRITE_NEWLINES:
+ source = source.replace(RN, N) + N
+ try:
+ tree = ast.parse(source)
+ except SyntaxError:
+ # Let this pop up again in the real import.
+ state.trace("failed to parse: %r" % (fn,))
+ return None, None
+ rewrite_asserts(tree)
+ try:
+ co = compile(tree, fn.strpath, "exec")
+ except SyntaxError:
+ # It's possible that this error is from some bug in the
+ # assertion rewriting, but I don't know of a fast way to tell.
+ state.trace("failed to compile: %r" % (fn,))
+ return None, None
+ return stat, co
+
+def _make_rewritten_pyc(state, source_stat, pyc, co):
+ """Try to dump rewritten code to *pyc*."""
+ if sys.platform.startswith("win"):
+ # Windows grants exclusive access to open files and doesn't have atomic
+ # rename, so just write into the final file.
+ _write_pyc(state, co, source_stat, pyc)
+ else:
+ # When not on windows, assume rename is atomic. Dump the code object
+ # into a file specific to this process and atomically replace it.
+ proc_pyc = pyc + "." + str(os.getpid())
+ if _write_pyc(state, co, source_stat, proc_pyc):
+ os.rename(proc_pyc, pyc)
+
+def _read_pyc(source, pyc, trace=lambda x: None):
+ """Possibly read a pytest pyc containing rewritten code.
+
+ Return rewritten code if successful or None if not.
+ """
+ try:
+ fp = open(pyc, "rb")
+ except IOError:
+ return None
+ with fp:
+ try:
+ mtime = int(source.mtime())
+ size = source.size()
+ data = fp.read(12)
+ except EnvironmentError as e:
+ trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
+ return None
+ # Check for invalid or out of date pyc file.
+ if (len(data) != 12 or data[:4] != imp.get_magic() or
+ struct.unpack("<ll", data[4:]) != (mtime, size)):
+ trace('_read_pyc(%s): invalid or out of date pyc' % source)
+ return None
+ try:
+ co = marshal.load(fp)
+ except Exception as e:
+ trace('_read_pyc(%s): marshal.load error %s' % (source, e))
+ return None
+ if not isinstance(co, types.CodeType):
+ trace('_read_pyc(%s): not a code object' % source)
+ return None
+ return co
+
+
+def rewrite_asserts(mod):
+ """Rewrite the assert statements in mod."""
+ AssertionRewriter().run(mod)
+
+
+def _saferepr(obj):
+ """Get a safe repr of an object for assertion error messages.
+
+ The assertion formatting (util.format_explanation()) requires
+ newlines to be escaped since they are a special character for it.
+ Normally assertion.util.format_explanation() does this but for a
+ custom repr it is possible to contain one of the special escape
+ sequences, especially '\n{' and '\n}' are likely to be present in
+ JSON reprs.
+
+ """
+ repr = py.io.saferepr(obj)
+ if py.builtin._istext(repr):
+ t = py.builtin.text
+ else:
+ t = py.builtin.bytes
+ return repr.replace(t("\n"), t("\\n"))
+
+
+from _pytest.assertion.util import format_explanation as _format_explanation # noqa
+
+def _format_assertmsg(obj):
+ """Format the custom assertion message given.
+
+ For strings this simply replaces newlines with '\n~' so that
+ util.format_explanation() will preserve them instead of escaping
+ newlines. For other objects py.io.saferepr() is used first.
+
+ """
+ # reprlib appears to have a bug which means that if a string
+ # contains a newline it gets escaped, however if an object has a
+ # .__repr__() which contains newlines it does not get escaped.
+ # However in either case we want to preserve the newline.
+ if py.builtin._istext(obj) or py.builtin._isbytes(obj):
+ s = obj
+ is_repr = False
+ else:
+ s = py.io.saferepr(obj)
+ is_repr = True
+ if py.builtin._istext(s):
+ t = py.builtin.text
+ else:
+ t = py.builtin.bytes
+ s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
+ if is_repr:
+ s = s.replace(t("\\n"), t("\n~"))
+ return s
+
+def _should_repr_global_name(obj):
+ return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
+
+def _format_boolop(explanations, is_or):
+ explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
+ if py.builtin._istext(explanation):
+ t = py.builtin.text
+ else:
+ t = py.builtin.bytes
+ return explanation.replace(t('%'), t('%%'))
+
+def _call_reprcompare(ops, results, expls, each_obj):
+ for i, res, expl in zip(range(len(ops)), results, expls):
+ try:
+ done = not res
+ except Exception:
+ done = True
+ if done:
+ break
+ if util._reprcompare is not None:
+ custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
+ if custom is not None:
+ return custom
+ return expl
+
+
+unary_map = {
+ ast.Not: "not %s",
+ ast.Invert: "~%s",
+ ast.USub: "-%s",
+ ast.UAdd: "+%s"
+}
+
+binop_map = {
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
+}
+# Python 3.5+ compatibility
+try:
+ binop_map[ast.MatMult] = "@"
+except AttributeError:
+ pass
+
+# Python 3.4+ compatibility
+if hasattr(ast, "NameConstant"):
+ _NameConstant = ast.NameConstant
+else:
+ def _NameConstant(c):
+ return ast.Name(str(c), ast.Load())
+
+
+def set_location(node, lineno, col_offset):
+ """Set node location information recursively."""
+ def _fix(node, lineno, col_offset):
+ if "lineno" in node._attributes:
+ node.lineno = lineno
+ if "col_offset" in node._attributes:
+ node.col_offset = col_offset
+ for child in ast.iter_child_nodes(node):
+ _fix(child, lineno, col_offset)
+ _fix(node, lineno, col_offset)
+ return node
+
+
+class AssertionRewriter(ast.NodeVisitor):
+ """Assertion rewriting implementation.
+
+ The main entrypoint is to call .run() with an ast.Module instance,
+ this will then find all the assert statements and re-write them to
+ provide intermediate values and a detailed assertion error. See
+ http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
+ for an overview of how this works.
+
+ The entry point here is .run() which will iterate over all the
+ statements in an ast.Module and for each ast.Assert statement it
+ finds call .visit() with it. Then .visit_Assert() takes over and
+ is responsible for creating new ast statements to replace the
+ original assert statement: it re-writes the test of an assertion
+ to provide intermediate values and replace it with an if statement
+ which raises an assertion error with a detailed explanation in
+ case the expression is false.
+
+ For this .visit_Assert() uses the visitor pattern to visit all the
+ AST nodes of the ast.Assert.test field, each visit call returning
+ an AST node and the corresponding explanation string. During this
+ state is kept in several instance attributes:
+
+ :statements: All the AST statements which will replace the assert
+ statement.
+
+ :variables: This is populated by .variable() with each variable
+ used by the statements so that they can all be set to None at
+ the end of the statements.
+
+ :variable_counter: Counter to create new unique variables needed
+ by statements. Variables are created using .variable() and
+ have the form of "@py_assert0".
+
+ :on_failure: The AST statements which will be executed if the
+ assertion test fails. This is the code which will construct
+ the failure message and raises the AssertionError.
+
+ :explanation_specifiers: A dict filled by .explanation_param()
+ with %-formatting placeholders and their corresponding
+ expressions to use in the building of an assertion message.
+ This is used by .pop_format_context() to build a message.
+
+ :stack: A stack of the explanation_specifiers dicts maintained by
+ .push_format_context() and .pop_format_context() which allows
+ to build another %-formatted string while already building one.
+
+ This state is reset on every new assert statement visited and used
+ by the other visitors.
+
+ """
+
+ def run(self, mod):
+ """Find all assert statements in *mod* and rewrite them."""
+ if not mod.body:
+ # Nothing to do.
+ return
+ # Insert some special imports at the top of the module but after any
+ # docstrings and __future__ imports.
+ aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
+ ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
+ expect_docstring = True
+ pos = 0
+ lineno = 0
+ for item in mod.body:
+ if (expect_docstring and isinstance(item, ast.Expr) and
+ isinstance(item.value, ast.Str)):
+ doc = item.value.s
+ if "PYTEST_DONT_REWRITE" in doc:
+ # The module has disabled assertion rewriting.
+ return
+ lineno += len(doc) - 1
+ expect_docstring = False
+ elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
+ item.module != "__future__"):
+ lineno = item.lineno
+ break
+ pos += 1
+ imports = [ast.Import([alias], lineno=lineno, col_offset=0)
+ for alias in aliases]
+ mod.body[pos:pos] = imports
+ # Collect asserts.
+ nodes = [mod]
+ while nodes:
+ node = nodes.pop()
+ for name, field in ast.iter_fields(node):
+ if isinstance(field, list):
+ new = []
+ for i, child in enumerate(field):
+ if isinstance(child, ast.Assert):
+ # Transform assert.
+ new.extend(self.visit(child))
+ else:
+ new.append(child)
+ if isinstance(child, ast.AST):
+ nodes.append(child)
+ setattr(node, name, new)
+ elif (isinstance(field, ast.AST) and
+ # Don't recurse into expressions as they can't contain
+ # asserts.
+ not isinstance(field, ast.expr)):
+ nodes.append(field)
+
+ def variable(self):
+ """Get a new variable."""
+ # Use a character invalid in python identifiers to avoid clashing.
+ name = "@py_assert" + str(next(self.variable_counter))
+ self.variables.append(name)
+ return name
+
+ def assign(self, expr):
+ """Give *expr* a name."""
+ name = self.variable()
+ self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
+ return ast.Name(name, ast.Load())
+
+ def display(self, expr):
+ """Call py.io.saferepr on the expression."""
+ return self.helper("saferepr", expr)
+
+ def helper(self, name, *args):
+ """Call a helper in this module."""
+ py_name = ast.Name("@pytest_ar", ast.Load())
+ attr = ast.Attribute(py_name, "_" + name, ast.Load())
+ return ast_Call(attr, list(args), [])
+
+ def builtin(self, name):
+ """Return the builtin called *name*."""
+ builtin_name = ast.Name("@py_builtins", ast.Load())
+ return ast.Attribute(builtin_name, name, ast.Load())
+
+ def explanation_param(self, expr):
+ """Return a new named %-formatting placeholder for expr.
+
+ This creates a %-formatting placeholder for expr in the
+ current formatting context, e.g. ``%(py0)s``. The placeholder
+ and expr are placed in the current format context so that it
+ can be used on the next call to .pop_format_context().
+
+ """
+ specifier = "py" + str(next(self.variable_counter))
+ self.explanation_specifiers[specifier] = expr
+ return "%(" + specifier + ")s"
+
+ def push_format_context(self):
+ """Create a new formatting context.
+
+ The format context is used for when an explanation wants to
+ have a variable value formatted in the assertion message. In
+ this case the value required can be added using
+ .explanation_param(). Finally .pop_format_context() is used
+ to format a string of %-formatted values as added by
+ .explanation_param().
+
+ """
+ self.explanation_specifiers = {}
+ self.stack.append(self.explanation_specifiers)
+
+ def pop_format_context(self, expl_expr):
+ """Format the %-formatted string with current format context.
+
+ The expl_expr should be an ast.Str instance constructed from
+ the %-placeholders created by .explanation_param(). This will
+ add the required code to format said string to .on_failure and
+ return the ast.Name instance of the formatted string.
+
+ """
+ current = self.stack.pop()
+ if self.stack:
+ self.explanation_specifiers = self.stack[-1]
+ keys = [ast.Str(key) for key in current.keys()]
+ format_dict = ast.Dict(keys, list(current.values()))
+ form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
+ name = "@py_format" + str(next(self.variable_counter))
+ self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
+ return ast.Name(name, ast.Load())
+
+ def generic_visit(self, node):
+ """Handle expressions we don't have custom code for."""
+ assert isinstance(node, ast.expr)
+ res = self.assign(node)
+ return res, self.explanation_param(self.display(res))
+
+ def visit_Assert(self, assert_):
+ """Return the AST statements to replace the ast.Assert instance.
+
+ This re-writes the test of an assertion to provide
+ intermediate values and replace it with an if statement which
+ raises an assertion error with a detailed explanation in case
+ the expression is false.
+
+ """
+ self.statements = []
+ self.variables = []
+ self.variable_counter = itertools.count()
+ self.stack = []
+ self.on_failure = []
+ self.push_format_context()
+ # Rewrite assert into a bunch of statements.
+ top_condition, explanation = self.visit(assert_.test)
+ # Create failure message.
+ body = self.on_failure
+ negation = ast.UnaryOp(ast.Not(), top_condition)
+ self.statements.append(ast.If(negation, body, []))
+ if assert_.msg:
+ assertmsg = self.helper('format_assertmsg', assert_.msg)
+ explanation = "\n>assert " + explanation
+ else:
+ assertmsg = ast.Str("")
+ explanation = "assert " + explanation
+ template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
+ msg = self.pop_format_context(template)
+ fmt = self.helper("format_explanation", msg)
+ err_name = ast.Name("AssertionError", ast.Load())
+ exc = ast_Call(err_name, [fmt], [])
+ if sys.version_info[0] >= 3:
+ raise_ = ast.Raise(exc, None)
+ else:
+ raise_ = ast.Raise(exc, None, None)
+ body.append(raise_)
+ # Clear temporary variables by setting them to None.
+ if self.variables:
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
+ clear = ast.Assign(variables, _NameConstant(None))
+ self.statements.append(clear)
+ # Fix line numbers.
+ for stmt in self.statements:
+ set_location(stmt, assert_.lineno, assert_.col_offset)
+ return self.statements
+
+ def visit_Name(self, name):
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
+ locs = ast_Call(self.builtin("locals"), [], [])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
+ expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
+ return name, self.explanation_param(expr)
+
+ def visit_BoolOp(self, boolop):
+ res_var = self.variable()
+ expl_list = self.assign(ast.List([], ast.Load()))
+ app = ast.Attribute(expl_list, "append", ast.Load())
+ is_or = int(isinstance(boolop.op, ast.Or))
+ body = save = self.statements
+ fail_save = self.on_failure
+ levels = len(boolop.values) - 1
+ self.push_format_context()
+ # Process each operand, short-circuting if needed.
+ for i, v in enumerate(boolop.values):
+ if i:
+ fail_inner = []
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
+ self.on_failure = fail_inner
+ self.push_format_context()
+ res, expl = self.visit(v)
+ body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
+ expl_format = self.pop_format_context(ast.Str(expl))
+ call = ast_Call(app, [expl_format], [])
+ self.on_failure.append(ast.Expr(call))
+ if i < levels:
+ cond = res
+ if is_or:
+ cond = ast.UnaryOp(ast.Not(), cond)
+ inner = []
+ self.statements.append(ast.If(cond, inner, []))
+ self.statements = body = inner
+ self.statements = save
+ self.on_failure = fail_save
+ expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
+ expl = self.pop_format_context(expl_template)
+ return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_res, operand_expl = self.visit(unary.operand)
+ res = self.assign(ast.UnaryOp(unary.op, operand_res))
+ return res, pattern % (operand_expl,)
+
+ def visit_BinOp(self, binop):
+ symbol = binop_map[binop.op.__class__]
+ left_expr, left_expl = self.visit(binop.left)
+ right_expr, right_expl = self.visit(binop.right)
+ explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
+ res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
+ return res, explanation
+
+ def visit_Call_35(self, call):
+ """
+ visit `ast.Call` nodes on Python3.5 and after
+ """
+ new_func, func_expl = self.visit(call.func)
+ arg_expls = []
+ new_args = []
+ new_kwargs = []
+ for arg in call.args:
+ res, expl = self.visit(arg)
+ arg_expls.append(expl)
+ new_args.append(res)
+ for keyword in call.keywords:
+ res, expl = self.visit(keyword.value)
+ new_kwargs.append(ast.keyword(keyword.arg, res))
+ if keyword.arg:
+ arg_expls.append(keyword.arg + "=" + expl)
+ else: ## **args have `arg` keywords with an .arg of None
+ arg_expls.append("**" + expl)
+
+ expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+ new_call = ast.Call(new_func, new_args, new_kwargs)
+ res = self.assign(new_call)
+ res_expl = self.explanation_param(self.display(res))
+ outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+ return res, outer_expl
+
+ def visit_Starred(self, starred):
+ # From Python 3.5, a Starred node can appear in a function call
+ res, expl = self.visit(starred.value)
+ return starred, '*' + expl
+
+ def visit_Call_legacy(self, call):
+ """
+ visit `ast.Call nodes on 3.4 and below`
+ """
+ new_func, func_expl = self.visit(call.func)
+ arg_expls = []
+ new_args = []
+ new_kwargs = []
+ new_star = new_kwarg = None
+ for arg in call.args:
+ res, expl = self.visit(arg)
+ new_args.append(res)
+ arg_expls.append(expl)
+ for keyword in call.keywords:
+ res, expl = self.visit(keyword.value)
+ new_kwargs.append(ast.keyword(keyword.arg, res))
+ arg_expls.append(keyword.arg + "=" + expl)
+ if call.starargs:
+ new_star, expl = self.visit(call.starargs)
+ arg_expls.append("*" + expl)
+ if call.kwargs:
+ new_kwarg, expl = self.visit(call.kwargs)
+ arg_expls.append("**" + expl)
+ expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
+ res = self.assign(new_call)
+ res_expl = self.explanation_param(self.display(res))
+ outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+ return res, outer_expl
+
+ # ast.Call signature changed on 3.5,
+ # conditionally change which methods is named
+ # visit_Call depending on Python version
+ if sys.version_info >= (3, 5):
+ visit_Call = visit_Call_35
+ else:
+ visit_Call = visit_Call_legacy
+
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ value, value_expl = self.visit(attr.value)
+ res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
+ res_expl = self.explanation_param(self.display(res))
+ pat = "%s\n{%s = %s.%s\n}"
+ expl = pat % (res_expl, res_expl, value_expl, attr.attr)
+ return res, expl
+
+ def visit_Compare(self, comp):
+ self.push_format_context()
+ left_res, left_expl = self.visit(comp.left)
+ res_variables = [self.variable() for i in range(len(comp.ops))]
+ load_names = [ast.Name(v, ast.Load()) for v in res_variables]
+ store_names = [ast.Name(v, ast.Store()) for v in res_variables]
+ it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
+ expls = []
+ syms = []
+ results = [left_res]
+ for i, op, next_operand in it:
+ next_res, next_expl = self.visit(next_operand)
+ results.append(next_res)
+ sym = binop_map[op.__class__]
+ syms.append(ast.Str(sym))
+ expl = "%s %s %s" % (left_expl, sym, next_expl)
+ expls.append(ast.Str(expl))
+ res_expr = ast.Compare(left_res, [op], [next_res])
+ self.statements.append(ast.Assign([store_names[i]], res_expr))
+ left_res, left_expl = next_res, next_expl
+ # Use pytest.assertion.util._reprcompare if that's available.
+ expl_call = self.helper("call_reprcompare",
+ ast.Tuple(syms, ast.Load()),
+ ast.Tuple(load_names, ast.Load()),
+ ast.Tuple(expls, ast.Load()),
+ ast.Tuple(results, ast.Load()))
+ if len(comp.ops) > 1:
+ res = ast.BoolOp(ast.And(), load_names)
+ else:
+ res = load_names[0]
+ return res, self.explanation_param(self.pop_format_context(expl_call))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/assertion/util.py b/testing/web-platform/tests/tools/pytest/_pytest/assertion/util.py
new file mode 100644
index 000000000..f2f23efea
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/assertion/util.py
@@ -0,0 +1,332 @@
+"""Utilities for assertion debugging"""
+import pprint
+
+import _pytest._code
+import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
+
+# The _reprcompare attribute on the util module is used by the new assertion
+# interpretation code and assertion rewriter to detect this plugin was
+# loaded and in turn call the hooks defined here as part of the
+# DebugInterpreter.
+_reprcompare = None
+
+
+# the re-encoding is needed for python2 repr
+# with non-ascii characters (see issue 877 and 1379)
+def ecu(s):
+ try:
+ return u(s, 'utf-8', 'replace')
+ except TypeError:
+ return s
+
+
+def format_explanation(explanation):
+ """This formats an explanation
+
+ Normally all embedded newlines are escaped, however there are
+ three exceptions: \n{, \n} and \n~. The first two are intended
+ cover nested explanations, see function and attribute explanations
+ for examples (.visit_Call(), visit_Attribute()). The last one is
+ for when one explanation needs to span multiple lines, e.g. when
+ displaying diffs.
+ """
+ explanation = ecu(explanation)
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
+ where = 0
+ while True:
+ start = where = explanation.find("False\n{False = ", where)
+ if where == -1:
+ break
+ level = 0
+ prev_c = explanation[start]
+ for i, c in enumerate(explanation[start:]):
+ if prev_c + c == "\n{":
+ level += 1
+ elif prev_c + c == "\n}":
+ level -= 1
+ if not level:
+ break
+ prev_c = c
+ else:
+ raise AssertionError("unbalanced braces: %r" % (explanation,))
+ end = start + i
+ where = end
+ if explanation[end - 1] == '\n':
+ explanation = (explanation[:start] + explanation[start+15:end-1] +
+ explanation[end+1:])
+ where -= 17
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
+ lines = [raw_lines[0]]
+ for l in raw_lines[1:]:
+ if l and l[0] in ['{', '}', '~', '>']:
+ lines.append(l)
+ else:
+ lines[-1] += '\\n' + l
+ return lines
+
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
+ result = lines[:1]
+ stack = [0]
+ stackcnt = [0]
+ for line in lines[1:]:
+ if line.startswith('{'):
+ if stackcnt[-1]:
+ s = u('and ')
+ else:
+ s = u('where ')
+ stack.append(len(result))
+ stackcnt[-1] += 1
+ stackcnt.append(0)
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
+ elif line.startswith('}'):
+ stack.pop()
+ stackcnt.pop()
+ result[stack[-1]] += line[1:]
+ else:
+ assert line[0] in ['~', '>']
+ stack[-1] += 1
+ indent = len(stack) if line.startswith('~') else len(stack) - 1
+ result.append(u(' ')*indent + line[1:])
+ assert len(stack) == 1
+ return result
+
+
+# Provide basestring in python3
+try:
+ basestring = basestring
+except NameError:
+ basestring = str
+
+
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+ left_repr = py.io.saferepr(left, maxsize=int(width/2))
+ right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
+
+ summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
+
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
+ not isinstance(x, basestring))
+ istext = lambda x: isinstance(x, basestring)
+ isdict = lambda x: isinstance(x, dict)
+ isset = lambda x: isinstance(x, (set, frozenset))
+
+ def isiterable(obj):
+ try:
+ iter(obj)
+ return not istext(obj)
+ except TypeError:
+ return False
+
+ verbose = config.getoption('verbose')
+ explanation = None
+ try:
+ if op == '==':
+ if istext(left) and istext(right):
+ explanation = _diff_text(left, right, verbose)
+ else:
+ if issequence(left) and issequence(right):
+ explanation = _compare_eq_sequence(left, right, verbose)
+ elif isset(left) and isset(right):
+ explanation = _compare_eq_set(left, right, verbose)
+ elif isdict(left) and isdict(right):
+ explanation = _compare_eq_dict(left, right, verbose)
+ if isiterable(left) and isiterable(right):
+ expl = _compare_eq_iterable(left, right, verbose)
+ if explanation is not None:
+ explanation.extend(expl)
+ else:
+ explanation = expl
+ elif op == 'not in':
+ if istext(left) and istext(right):
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(_pytest._code.ExceptionInfo())]
+
+ if not explanation:
+ return None
+
+ return [summary] + explanation
+
+
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
+
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
+ """
+ from difflib import ndiff
+ explanation = []
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
+ explanation += [line.strip('\n')
+ for line in ndiff(left.splitlines(),
+ right.splitlines())]
+ return explanation
+
+
+def _compare_eq_iterable(left, right, verbose=False):
+ if not verbose:
+ return [u('Use -v to get the full diff')]
+ # dynamic import to speedup pytest
+ import difflib
+
+ try:
+ left_formatting = pprint.pformat(left).splitlines()
+ right_formatting = pprint.pformat(right).splitlines()
+ explanation = [u('Full diff:')]
+ except Exception:
+ # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
+ # sorted() on a list would raise. See issue #718.
+ # As a workaround, the full diff is generated by using the repr() string of each item of each container.
+ left_formatting = sorted(repr(x) for x in left)
+ right_formatting = sorted(repr(x) for x in right)
+ explanation = [u('Full diff (fallback to calling repr on each item):')]
+ explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
+ return explanation
+
+
+def _compare_eq_sequence(left, right, verbose=False):
+ explanation = []
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
+ break
+ if len(left) > len(right):
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
+ elif len(left) < len(right):
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation
+
+
+def _compare_eq_set(left, right, verbose=False):
+ explanation = []
+ diff_left = left - right
+ diff_right = right - left
+ if diff_left:
+ explanation.append(u('Extra items in the left set:'))
+ for item in diff_left:
+ explanation.append(py.io.saferepr(item))
+ if diff_right:
+ explanation.append(u('Extra items in the right set:'))
+ for item in diff_right:
+ explanation.append(py.io.saferepr(item))
+ return explanation
+
+
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
+ index = text.find(term)
+ head = text[:index]
+ tail = text[index+len(term):]
+ correct_text = head + tail
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
+ for line in diff:
+ if line.startswith(u('Skipping')):
+ continue
+ if line.startswith(u('- ')):
+ continue
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
+ else:
+ newdiff.append(line)
+ return newdiff
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/cacheprovider.py b/testing/web-platform/tests/tools/pytest/_pytest/cacheprovider.py
new file mode 100755
index 000000000..0657001f2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/cacheprovider.py
@@ -0,0 +1,245 @@
+"""
+merged implementation of the cache provider
+
+the name cache was not choosen to ensure pluggy automatically
+ignores the external pytest-cache
+"""
+
+import py
+import pytest
+import json
+from os.path import sep as _sep, altsep as _altsep
+
+
+class Cache(object):
+ def __init__(self, config):
+ self.config = config
+ self._cachedir = config.rootdir.join(".cache")
+ self.trace = config.trace.root.get("cache")
+ if config.getvalue("cacheclear"):
+ self.trace("clearing cachedir")
+ if self._cachedir.check():
+ self._cachedir.remove()
+ self._cachedir.mkdir()
+
+ def makedir(self, name):
+ """ return a directory path object with the given name. If the
+ directory does not yet exist, it will be created. You can use it
+ to manage files likes e. g. store/retrieve database
+ dumps across test sessions.
+
+ :param name: must be a string not containing a ``/`` separator.
+ Make sure the name contains your plugin or application
+ identifiers to prevent clashes with other cache users.
+ """
+ if _sep in name or _altsep is not None and _altsep in name:
+ raise ValueError("name is not allowed to contain path separators")
+ return self._cachedir.ensure_dir("d", name)
+
+ def _getvaluepath(self, key):
+ return self._cachedir.join('v', *key.split('/'))
+
+ def get(self, key, default):
+ """ return cached value for the given key. If no value
+ was yet cached or the value cannot be read, the specified
+ default is returned.
+
+ :param key: must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param default: must be provided in case of a cache-miss or
+ invalid cache values.
+
+ """
+ path = self._getvaluepath(key)
+ if path.check():
+ try:
+ with path.open("r") as f:
+ return json.load(f)
+ except ValueError:
+ self.trace("cache-invalid at %s" % (path,))
+ return default
+
+ def set(self, key, value):
+ """ save value for the given key.
+
+ :param key: must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param value: must be of any combination of basic
+ python types, including nested types
+ like e. g. lists of dictionaries.
+ """
+ path = self._getvaluepath(key)
+ try:
+ path.dirpath().ensure_dir()
+ except (py.error.EEXIST, py.error.EACCES):
+ self.config.warn(
+ code='I9', message='could not create cache path %s' % (path,)
+ )
+ return
+ try:
+ f = path.open('w')
+ except py.error.ENOTDIR:
+ self.config.warn(
+ code='I9', message='cache could not write path %s' % (path,))
+ else:
+ with f:
+ self.trace("cache-write %s: %r" % (key, value,))
+ json.dump(value, f, indent=2, sort_keys=True)
+
+
+class LFPlugin:
+ """ Plugin which implements the --lf (run last-failing) option """
+ def __init__(self, config):
+ self.config = config
+ active_keys = 'lf', 'failedfirst'
+ self.active = any(config.getvalue(key) for key in active_keys)
+ if self.active:
+ self.lastfailed = config.cache.get("cache/lastfailed", {})
+ else:
+ self.lastfailed = {}
+
+ def pytest_report_header(self):
+ if self.active:
+ if not self.lastfailed:
+ mode = "run all (no recorded failures)"
+ else:
+ mode = "rerun last %d failures%s" % (
+ len(self.lastfailed),
+ " first" if self.config.getvalue("failedfirst") else "")
+ return "run-last-failure: %s" % mode
+
+ def pytest_runtest_logreport(self, report):
+ if report.failed and "xfail" not in report.keywords:
+ self.lastfailed[report.nodeid] = True
+ elif not report.failed:
+ if report.when == "call":
+ self.lastfailed.pop(report.nodeid, None)
+
+ def pytest_collectreport(self, report):
+ passed = report.outcome in ('passed', 'skipped')
+ if passed:
+ if report.nodeid in self.lastfailed:
+ self.lastfailed.pop(report.nodeid)
+ self.lastfailed.update(
+ (item.nodeid, True)
+ for item in report.result)
+ else:
+ self.lastfailed[report.nodeid] = True
+
+ def pytest_collection_modifyitems(self, session, config, items):
+ if self.active and self.lastfailed:
+ previously_failed = []
+ previously_passed = []
+ for item in items:
+ if item.nodeid in self.lastfailed:
+ previously_failed.append(item)
+ else:
+ previously_passed.append(item)
+ if not previously_failed and previously_passed:
+ # running a subset of all tests with recorded failures outside
+ # of the set of tests currently executing
+ pass
+ elif self.config.getvalue("failedfirst"):
+ items[:] = previously_failed + previously_passed
+ else:
+ items[:] = previously_failed
+ config.hook.pytest_deselected(items=previously_passed)
+
+ def pytest_sessionfinish(self, session):
+ config = self.config
+ if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
+ return
+ prev_failed = config.cache.get("cache/lastfailed", None) is not None
+ if (session.testscollected and prev_failed) or self.lastfailed:
+ config.cache.set("cache/lastfailed", self.lastfailed)
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption(
+ '--lf', '--last-failed', action='store_true', dest="lf",
+ help="rerun only the tests that failed "
+ "at the last run (or all if none failed)")
+ group.addoption(
+ '--ff', '--failed-first', action='store_true', dest="failedfirst",
+ help="run all tests but run the last failures first. "
+ "This may re-order tests and thus lead to "
+ "repeated fixture setup/teardown")
+ group.addoption(
+ '--cache-show', action='store_true', dest="cacheshow",
+ help="show cache contents, don't perform collection or tests")
+ group.addoption(
+ '--cache-clear', action='store_true', dest="cacheclear",
+ help="remove all cache contents at start of test run.")
+
+
+def pytest_cmdline_main(config):
+ if config.option.cacheshow:
+ from _pytest.main import wrap_session
+ return wrap_session(config, cacheshow)
+
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_configure(config):
+ config.cache = Cache(config)
+ config.pluginmanager.register(LFPlugin(config), "lfplugin")
+
+
+@pytest.fixture
+def cache(request):
+ """
+ Return a cache object that can persist state between testing sessions.
+
+ cache.get(key, default)
+ cache.set(key, value)
+
+ Keys must be a ``/`` separated value, where the first part is usually the
+ name of your plugin or application to avoid clashes with other cache users.
+
+ Values can be any object handled by the json stdlib module.
+ """
+ return request.config.cache
+
+
+def pytest_report_header(config):
+ if config.option.verbose:
+ relpath = py.path.local().bestrelpath(config.cache._cachedir)
+ return "cachedir: %s" % relpath
+
+
+def cacheshow(config, session):
+ from pprint import pprint
+ tw = py.io.TerminalWriter()
+ tw.line("cachedir: " + str(config.cache._cachedir))
+ if not config.cache._cachedir.check():
+ tw.line("cache is empty")
+ return 0
+ dummy = object()
+ basedir = config.cache._cachedir
+ vdir = basedir.join("v")
+ tw.sep("-", "cache values")
+ for valpath in vdir.visit(lambda x: x.isfile()):
+ key = valpath.relto(vdir).replace(valpath.sep, "/")
+ val = config.cache.get(key, dummy)
+ if val is dummy:
+ tw.line("%s contains unreadable content, "
+ "will be ignored" % key)
+ else:
+ tw.line("%s contains:" % key)
+ stream = py.io.TextIO()
+ pprint(val, stream=stream)
+ for line in stream.getvalue().splitlines():
+ tw.line(" " + line)
+
+ ddir = basedir.join("d")
+ if ddir.isdir() and ddir.listdir():
+ tw.sep("-", "cache directories")
+ for p in basedir.join("d").visit():
+ #if p.check(dir=1):
+ # print("%s/" % p.relto(basedir))
+ if p.isfile():
+ key = p.relto(basedir)
+ tw.line("%s is a file of length %d" % (
+ key, p.size()))
+ return 0
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/capture.py b/testing/web-platform/tests/tools/pytest/_pytest/capture.py
new file mode 100644
index 000000000..3895a714a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/capture.py
@@ -0,0 +1,472 @@
+"""
+per-test stdout/stderr capturing mechanism.
+
+"""
+from __future__ import with_statement
+
+import sys
+import os
+from tempfile import TemporaryFile
+
+import py
+import pytest
+
+from py.io import TextIO
+unicode = py.builtin.text
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption(
+ '--capture', action="store",
+ default="fd" if hasattr(os, "dup") else "sys",
+ metavar="method", choices=['fd', 'sys', 'no'],
+ help="per-test capturing method: one of fd|sys|no.")
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
+ help="shortcut for --capture=no.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_load_initial_conftests(early_config, parser, args):
+ _readline_workaround()
+ ns = early_config.known_args_namespace
+ pluginmanager = early_config.pluginmanager
+ capman = CaptureManager(ns.capture)
+ pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ early_config.add_cleanup(capman.reset_capturings)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.add_cleanup(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.init_capturings()
+ outcome = yield
+ out, err = capman.suspendcapture()
+ if outcome.excinfo is not None:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+
+
+class CaptureManager:
+ def __init__(self, method):
+ self._method = method
+
+ def _getcapture(self, method):
+ if method == "fd":
+ return MultiCapture(out=True, err=True, Capture=FDCapture)
+ elif method == "sys":
+ return MultiCapture(out=True, err=True, Capture=SysCapture)
+ elif method == "no":
+ return MultiCapture(out=False, err=False, in_=False)
+ else:
+ raise ValueError("unknown capturing method: %r" % method)
+
+ def init_capturings(self):
+ assert not hasattr(self, "_capturing")
+ self._capturing = self._getcapture(self._method)
+ self._capturing.start_capturing()
+
+ def reset_capturings(self):
+ cap = self.__dict__.pop("_capturing", None)
+ if cap is not None:
+ cap.pop_outerr_to_orig()
+ cap.stop_capturing()
+
+ def resumecapture(self):
+ self._capturing.resume_capturing()
+
+ def suspendcapture(self, in_=False):
+ self.deactivate_funcargs()
+ cap = getattr(self, "_capturing", None)
+ if cap is not None:
+ try:
+ outerr = cap.readouterr()
+ finally:
+ cap.suspend_capturing(in_=in_)
+ return outerr
+
+ def activate_funcargs(self, pyfuncitem):
+ capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None)
+ if capfuncarg is not None:
+ capfuncarg._start()
+ self._capfuncarg = capfuncarg
+
+ def deactivate_funcargs(self):
+ capfuncarg = self.__dict__.pop("_capfuncarg", None)
+ if capfuncarg is not None:
+ capfuncarg.close()
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_make_collect_report(self, collector):
+ if isinstance(collector, pytest.File):
+ self.resumecapture()
+ outcome = yield
+ out, err = self.suspendcapture()
+ rep = outcome.get_result()
+ if out:
+ rep.sections.append(("Captured stdout", out))
+ if err:
+ rep.sections.append(("Captured stderr", err))
+ else:
+ yield
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_setup(self, item):
+ self.resumecapture()
+ yield
+ self.suspendcapture_item(item, "setup")
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_call(self, item):
+ self.resumecapture()
+ self.activate_funcargs(item)
+ yield
+ #self.deactivate_funcargs() called from suspendcapture()
+ self.suspendcapture_item(item, "call")
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_teardown(self, item):
+ self.resumecapture()
+ yield
+ self.suspendcapture_item(item, "teardown")
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_keyboard_interrupt(self, excinfo):
+ self.reset_capturings()
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_internalerror(self, excinfo):
+ self.reset_capturings()
+
+ def suspendcapture_item(self, item, when):
+ out, err = self.suspendcapture()
+ item.add_report_section(when, "stdout", out)
+ item.add_report_section(when, "stderr", err)
+
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
+@pytest.fixture
+def capsys(request):
+ """enables capturing of writes to sys.stdout/sys.stderr and makes
+ captured output available via ``capsys.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ """
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ request.node._capfuncarg = c = CaptureFixture(SysCapture)
+ return c
+
+@pytest.fixture
+def capfd(request):
+ """enables capturing of writes to file descriptors 1 and 2 and makes
+ captured output available via ``capfd.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ """
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
+ if not hasattr(os, 'dup'):
+ pytest.skip("capfd funcarg needs os.dup")
+ request.node._capfuncarg = c = CaptureFixture(FDCapture)
+ return c
+
+
+class CaptureFixture:
+ def __init__(self, captureclass):
+ self.captureclass = captureclass
+
+ def _start(self):
+ self._capture = MultiCapture(out=True, err=True, in_=False,
+ Capture=self.captureclass)
+ self._capture.start_capturing()
+
+ def close(self):
+ cap = self.__dict__.pop("_capture", None)
+ if cap is not None:
+ self._outerr = cap.pop_outerr_to_orig()
+ cap.stop_capturing()
+
+ def readouterr(self):
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
+
+
+def safe_text_dupfile(f, mode, default_encoding="UTF8"):
+ """ return a open text file object that's a duplicate of f on the
+ FD-level if possible.
+ """
+ encoding = getattr(f, "encoding", None)
+ try:
+ fd = f.fileno()
+ except Exception:
+ if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
+ # we seem to have a text stream, let's just use it
+ return f
+ else:
+ newfd = os.dup(fd)
+ if "b" not in mode:
+ mode += "b"
+ f = os.fdopen(newfd, mode, 0) # no buffering
+ return EncodedFile(f, encoding or default_encoding)
+
+
+class EncodedFile(object):
+ errors = "strict" # possibly needed by py3 code (issue555)
+ def __init__(self, buffer, encoding):
+ self.buffer = buffer
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding, "replace")
+ self.buffer.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(object.__getattribute__(self, "buffer"), name)
+
+
+class MultiCapture(object):
+ out = err = in_ = None
+
+ def __init__(self, out=True, err=True, in_=True, Capture=None):
+ if in_:
+ self.in_ = Capture(0)
+ if out:
+ self.out = Capture(1)
+ if err:
+ self.err = Capture(2)
+
+ def start_capturing(self):
+ if self.in_:
+ self.in_.start()
+ if self.out:
+ self.out.start()
+ if self.err:
+ self.err.start()
+
+ def pop_outerr_to_orig(self):
+ """ pop current snapshot out/err capture and flush to orig streams. """
+ out, err = self.readouterr()
+ if out:
+ self.out.writeorg(out)
+ if err:
+ self.err.writeorg(err)
+ return out, err
+
+ def suspend_capturing(self, in_=False):
+ if self.out:
+ self.out.suspend()
+ if self.err:
+ self.err.suspend()
+ if in_ and self.in_:
+ self.in_.suspend()
+ self._in_suspended = True
+
+ def resume_capturing(self):
+ if self.out:
+ self.out.resume()
+ if self.err:
+ self.err.resume()
+ if hasattr(self, "_in_suspended"):
+ self.in_.resume()
+ del self._in_suspended
+
+ def stop_capturing(self):
+ """ stop capturing and reset capturing streams """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already stopped")
+ self._reset = True
+ if self.out:
+ self.out.done()
+ if self.err:
+ self.err.done()
+ if self.in_:
+ self.in_.done()
+
+ def readouterr(self):
+ """ return snapshot unicode value of stdout/stderr capturings. """
+ return (self.out.snap() if self.out is not None else "",
+ self.err.snap() if self.err is not None else "")
+
+class NoCapture:
+ __init__ = start = done = suspend = resume = lambda *args: None
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None):
+ self.targetfd = targetfd
+ try:
+ self.targetfd_save = os.dup(self.targetfd)
+ except OSError:
+ self.start = lambda: None
+ self.done = lambda: None
+ else:
+ if targetfd == 0:
+ assert not tmpfile, "cannot set tmpfile with stdin"
+ tmpfile = open(os.devnull, "r")
+ self.syscapture = SysCapture(targetfd)
+ else:
+ if tmpfile is None:
+ f = TemporaryFile()
+ with f:
+ tmpfile = safe_text_dupfile(f, mode="wb+")
+ if targetfd in patchsysdict:
+ self.syscapture = SysCapture(targetfd, tmpfile)
+ else:
+ self.syscapture = NoCapture()
+ self.tmpfile = tmpfile
+ self.tmpfile_fd = tmpfile.fileno()
+
+ def __repr__(self):
+ return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save)
+
+ def start(self):
+ """ Start capturing on targetfd using memorized tmpfile. """
+ try:
+ os.fstat(self.targetfd_save)
+ except (AttributeError, OSError):
+ raise ValueError("saved filedescriptor not valid anymore")
+ os.dup2(self.tmpfile_fd, self.targetfd)
+ self.syscapture.start()
+
+ def snap(self):
+ f = self.tmpfile
+ f.seek(0)
+ res = f.read()
+ if res:
+ enc = getattr(f, "encoding", None)
+ if enc and isinstance(res, bytes):
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+ return ''
+
+ def done(self):
+ """ stop capturing, restore streams, return original capture file,
+ seeked to position zero. """
+ targetfd_save = self.__dict__.pop("targetfd_save")
+ os.dup2(targetfd_save, self.targetfd)
+ os.close(targetfd_save)
+ self.syscapture.done()
+ self.tmpfile.close()
+
+ def suspend(self):
+ self.syscapture.suspend()
+ os.dup2(self.targetfd_save, self.targetfd)
+
+ def resume(self):
+ self.syscapture.resume()
+ os.dup2(self.tmpfile_fd, self.targetfd)
+
+ def writeorg(self, data):
+ """ write to original file descriptor. """
+ if py.builtin._istext(data):
+ data = data.encode("utf8") # XXX use encoding of original stream
+ os.write(self.targetfd_save, data)
+
+
+class SysCapture:
+ def __init__(self, fd, tmpfile=None):
+ name = patchsysdict[fd]
+ self._old = getattr(sys, name)
+ self.name = name
+ if tmpfile is None:
+ if name == "stdin":
+ tmpfile = DontReadFromInput()
+ else:
+ tmpfile = TextIO()
+ self.tmpfile = tmpfile
+
+ def start(self):
+ setattr(sys, self.name, self.tmpfile)
+
+ def snap(self):
+ f = self.tmpfile
+ res = f.getvalue()
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+ def done(self):
+ setattr(sys, self.name, self._old)
+ del self._old
+ self.tmpfile.close()
+
+ def suspend(self):
+ setattr(sys, self.name, self._old)
+
+ def resume(self):
+ setattr(sys, self.name, self.tmpfile)
+
+ def writeorg(self, data):
+ self._old.write(data)
+ self._old.flush()
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+
+ encoding = None
+
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
+
+
+def _readline_workaround():
+ """
+ Ensure readline is imported so that it attaches to the correct stdio
+ handles on Windows.
+
+ Pdb uses readline support where available--when not running from the Python
+ prompt, the readline module is not imported until running the pdb REPL. If
+ running py.test with the --pdb option this means the readline module is not
+ imported until after I/O capture has been started.
+
+ This is a problem for pyreadline, which is often used to implement readline
+ support on Windows, as it does not attach to the correct handles for stdout
+ and/or stdin if they have been redirected by the FDCapture mechanism. This
+ workaround ensures that readline is imported before I/O capture is setup so
+ that it can attach to the actual stdin/out for the console.
+
+ See https://github.com/pytest-dev/pytest/pull/1281
+ """
+
+ if not sys.platform.startswith('win32'):
+ return
+ try:
+ import readline # noqa
+ except ImportError:
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/config.py b/testing/web-platform/tests/tools/pytest/_pytest/config.py
new file mode 100644
index 000000000..fb7b1774f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/config.py
@@ -0,0 +1,1192 @@
+""" command line options, ini-file and conftest.py processing. """
+import argparse
+import shlex
+import traceback
+import types
+import warnings
+
+import py
+# DON't import pytest here because it causes import cycle troubles
+import sys, os
+import _pytest._code
+import _pytest.hookspec # the extension point definitions
+from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
+
+hookimpl = HookimplMarker("pytest")
+hookspec = HookspecMarker("pytest")
+
+# pytest startup
+#
+
+
+class ConftestImportFailure(Exception):
+ def __init__(self, path, excinfo):
+ Exception.__init__(self, path, excinfo)
+ self.path = path
+ self.excinfo = excinfo
+
+
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ try:
+ try:
+ config = _prepareconfig(args, plugins)
+ except ConftestImportFailure as e:
+ tw = py.io.TerminalWriter(sys.stderr)
+ for line in traceback.format_exception(*e.excinfo):
+ tw.line(line.rstrip(), red=True)
+ tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+ return 4
+ else:
+ try:
+ config.pluginmanager.check_pending()
+ return config.hook.pytest_cmdline_main(config=config)
+ finally:
+ config._ensure_unconfigure()
+ except UsageError as e:
+ for msg in e.args:
+ sys.stderr.write("ERROR: %s\n" %(msg,))
+ return 4
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest cacheprovider").split()
+
+builtin_plugins = set(default_plugins)
+builtin_plugins.add("pytester")
+
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_config())
+
+def get_config():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ config = Config(pluginmanager)
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return config
+
+def get_plugin_manager():
+ """
+ Obtain a new instance of the
+ :py:class:`_pytest.config.PytestPluginManager`, with default plugins
+ already loaded.
+
+ This function can be used by integration with other tools, like hooking
+ into pytest to run tests into an IDE.
+ """
+ return get_config().pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = shlex.split(args)
+ config = get_config()
+ pluginmanager = config.pluginmanager
+ try:
+ if plugins:
+ for plugin in plugins:
+ if isinstance(plugin, py.builtin._basestring):
+ pluginmanager.consider_pluginarg(plugin)
+ else:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+ except BaseException:
+ config._ensure_unconfigure()
+ raise
+
+
+class PytestPluginManager(PluginManager):
+ """
+ Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
+ functionality:
+
+ * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
+ ``pytest_plugins`` global variables found in plugins being loaded;
+ * ``conftest.py`` loading during start-up;
+ """
+ def __init__(self):
+ super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
+ self._conftest_plugins = set()
+
+ # state related to local conftest plugins
+ self._path2confmods = {}
+ self._conftestpath2mod = {}
+ self._confcutdir = None
+ self._noconftest = False
+
+ self.add_hookspecs(_pytest.hookspec)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+ self.enable_tracing()
+
+ def addhooks(self, module_or_class):
+ """
+ .. deprecated:: 2.8
+
+ Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
+ """
+ warning = dict(code="I2",
+ fslocation=_pytest._code.getfslineno(sys._getframe(1)),
+ nodeid=None,
+ message="use pluginmanager.add_hookspecs instead of "
+ "deprecated addhooks() method.")
+ self._warn(warning)
+ return self.add_hookspecs(module_or_class)
+
+ def parse_hookimpl_opts(self, plugin, name):
+ # pytest hooks are always prefixed with pytest_
+ # so we avoid accessing possibly non-readable attributes
+ # (see issue #1073)
+ if not name.startswith("pytest_"):
+ return
+ # ignore some historic special names which can not be hooks anyway
+ if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
+ return
+
+ method = getattr(plugin, name)
+ opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
+ if opts is not None:
+ for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
+ opts.setdefault(name, hasattr(method, name))
+ return opts
+
+ def parse_hookspec_opts(self, module_or_class, name):
+ opts = super(PytestPluginManager, self).parse_hookspec_opts(
+ module_or_class, name)
+ if opts is None:
+ method = getattr(module_or_class, name)
+ if name.startswith("pytest_"):
+ opts = {"firstresult": hasattr(method, "firstresult"),
+ "historic": hasattr(method, "historic")}
+ return opts
+
+ def _verify_hook(self, hook, hookmethod):
+ super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
+ if "__multicall__" in hookmethod.argnames:
+ fslineno = _pytest._code.getfslineno(hookmethod.function)
+ warning = dict(code="I1",
+ fslocation=fslineno,
+ nodeid=None,
+ message="%r hook uses deprecated __multicall__ "
+ "argument" % (hook.name))
+ self._warn(warning)
+
+ def register(self, plugin, name=None):
+ ret = super(PytestPluginManager, self).register(plugin, name)
+ if ret:
+ self.hook.pytest_plugin_registered.call_historic(
+ kwargs=dict(plugin=plugin, manager=self))
+ return ret
+
+ def getplugin(self, name):
+ # support deprecated naming because plugins (xdist e.g.) use it
+ return self.get_plugin(name)
+
+ def hasplugin(self, name):
+ """Return True if the plugin with the given name is registered."""
+ return bool(self.get_plugin(name))
+
+ def pytest_configure(self, config):
+ # XXX now that the pluginmanager exposes hookimpl(tryfirst...)
+ # we should remove tryfirst/trylast as markers
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
+ def _warn(self, message):
+ kwargs = message if isinstance(message, dict) else {
+ 'code': 'I1',
+ 'message': message,
+ 'fslocation': None,
+ 'nodeid': None,
+ }
+ self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
+
+ #
+ # internal API for local conftest plugin handling
+ #
+ def _set_initial_conftests(self, namespace):
+ """ load initial conftest files given a preparsed "namespace".
+ As conftest files may add their own command line options
+ which have arguments ('--my-opt somepath') we might get some
+ false positives. All builtin and 3rd party plugins will have
+ been loaded, however, so common options will not confuse our logic
+ here.
+ """
+ current = py.path.local()
+ self._confcutdir = current.join(namespace.confcutdir, abs=True) \
+ if namespace.confcutdir else None
+ self._noconftest = namespace.noconftest
+ testpaths = namespace.file_or_dir
+ foundanchor = False
+ for path in testpaths:
+ path = str(path)
+ # remove node-id syntax
+ i = path.find("::")
+ if i != -1:
+ path = path[:i]
+ anchor = current.join(path, abs=1)
+ if exists(anchor): # we found some file object
+ self._try_load_conftest(anchor)
+ foundanchor = True
+ if not foundanchor:
+ self._try_load_conftest(current)
+
+ def _try_load_conftest(self, anchor):
+ self._getconftestmodules(anchor)
+ # let's also consider test* subdirs
+ if anchor.check(dir=1):
+ for x in anchor.listdir("test*"):
+ if x.check(dir=1):
+ self._getconftestmodules(x)
+
+ def _getconftestmodules(self, path):
+ if self._noconftest:
+ return []
+ try:
+ return self._path2confmods[path]
+ except KeyError:
+ if path.isfile():
+ clist = self._getconftestmodules(path.dirpath())
+ else:
+ # XXX these days we may rather want to use config.rootdir
+ # and allow users to opt into looking into the rootdir parent
+ # directories instead of requiring to specify confcutdir
+ clist = []
+ for parent in path.parts():
+ if self._confcutdir and self._confcutdir.relto(parent):
+ continue
+ conftestpath = parent.join("conftest.py")
+ if conftestpath.isfile():
+ mod = self._importconftest(conftestpath)
+ clist.append(mod)
+
+ self._path2confmods[path] = clist
+ return clist
+
+ def _rget_with_confmod(self, name, path):
+ modules = self._getconftestmodules(path)
+ for mod in reversed(modules):
+ try:
+ return mod, getattr(mod, name)
+ except AttributeError:
+ continue
+ raise KeyError(name)
+
+ def _importconftest(self, conftestpath):
+ try:
+ return self._conftestpath2mod[conftestpath]
+ except KeyError:
+ pkgpath = conftestpath.pypkgpath()
+ if pkgpath is None:
+ _ensure_removed_sysmodule(conftestpath.purebasename)
+ try:
+ mod = conftestpath.pyimport()
+ except Exception:
+ raise ConftestImportFailure(conftestpath, sys.exc_info())
+
+ self._conftest_plugins.add(mod)
+ self._conftestpath2mod[conftestpath] = mod
+ dirpath = conftestpath.dirpath()
+ if dirpath in self._path2confmods:
+ for path, mods in self._path2confmods.items():
+ if path and path.relto(dirpath) or path == dirpath:
+ assert mod not in mods
+ mods.append(mod)
+ self.trace("loaded conftestmodule %r" %(mod))
+ self.consider_conftest(mod)
+ return mod
+
+ #
+ # API for bootstrapping plugin loading
+ #
+ #
+
+ def consider_preparse(self, args):
+ for opt1,opt2 in zip(args, args[1:]):
+ if opt1 == "-p":
+ self.consider_pluginarg(opt2)
+
+ def consider_pluginarg(self, arg):
+ if arg.startswith("no:"):
+ name = arg[3:]
+ self.set_blocked(name)
+ if not name.startswith("pytest_"):
+ self.set_blocked("pytest_" + name)
+ else:
+ self.import_plugin(arg)
+
+ def consider_conftest(self, conftestmodule):
+ if self.register(conftestmodule, name=conftestmodule.__file__):
+ self.consider_module(conftestmodule)
+
+ def consider_env(self):
+ self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
+
+ def consider_module(self, mod):
+ self._import_plugin_specs(getattr(mod, "pytest_plugins", None))
+
+ def _import_plugin_specs(self, spec):
+ if spec:
+ if isinstance(spec, str):
+ spec = spec.split(",")
+ for import_spec in spec:
+ self.import_plugin(import_spec)
+
+ def import_plugin(self, modname):
+ # most often modname refers to builtin modules, e.g. "pytester",
+ # "terminal" or "capture". Those plugins are registered under their
+ # basename for historic purposes but must be imported with the
+ # _pytest prefix.
+ assert isinstance(modname, str)
+ if self.get_plugin(modname) is not None:
+ return
+ if modname in builtin_plugins:
+ importspec = "_pytest." + modname
+ else:
+ importspec = modname
+ try:
+ __import__(importspec)
+ except ImportError as e:
+ new_exc = ImportError('Error importing plugin "%s": %s' % (modname, e))
+ # copy over name and path attributes
+ for attr in ('name', 'path'):
+ if hasattr(e, attr):
+ setattr(new_exc, attr, getattr(e, attr))
+ raise new_exc
+ except Exception as e:
+ import pytest
+ if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
+ raise
+ self._warn("skipped plugin %r: %s" %((modname, e.msg)))
+ else:
+ mod = sys.modules[importspec]
+ self.register(mod, modname)
+ self.consider_module(mod)
+
+
+class Parser:
+ """ Parser for command line arguments and ini-file values.
+
+ :ivar extra_info: dict of generic param -> value to display in case
+ there's an error processing the command line arguments.
+ """
+
+ def __init__(self, usage=None, processopt=None):
+ self._anonymous = OptionGroup("custom options", parser=self)
+ self._groups = []
+ self._processopt = processopt
+ self._usage = usage
+ self._inidict = {}
+ self._ininames = []
+ self.extra_info = {}
+
+ def processoption(self, option):
+ if self._processopt:
+ if option.dest:
+ self._processopt(option)
+
+ def getgroup(self, name, description="", after=None):
+ """ get (or create) a named option Group.
+
+ :name: name of the option group.
+ :description: long description for --help output.
+ :after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
+ """
+ for group in self._groups:
+ if group.name == name:
+ return group
+ group = OptionGroup(name, description, parser=self)
+ i = 0
+ for i, grp in enumerate(self._groups):
+ if grp.name == after:
+ break
+ self._groups.insert(i+1, group)
+ return group
+
+ def addoption(self, *opts, **attrs):
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ <http://docs.python.org/2/library/argparse.html>`_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
+ self._anonymous.addoption(*opts, **attrs)
+
+ def parse(self, args, namespace=None):
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self, self.extra_info)
+ groups = self._groups + [self._anonymous]
+ for group in groups:
+ if group.options:
+ desc = group.description or group.name
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
+ return optparser
+
+ def parse_setoption(self, args, option, namespace=None):
+ parsedoption = self.parse(args, namespace=namespace)
+ for name, value in parsedoption.__dict__.items():
+ setattr(option, name, value)
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args, namespace=None):
+ """parses and returns a namespace object with known arguments at this
+ point.
+ """
+ return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
+
+ def parse_known_and_unknown_args(self, args, namespace=None):
+ """parses and returns a namespace object with known arguments, and
+ the remaining arguments unknown at this point.
+ """
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args, namespace=namespace)
+
+ def addini(self, name, help, type=None, default=None):
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
+ or ``bool``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
+ assert type in (None, "pathlist", "args", "linelist", "bool")
+ self._inidict[name] = (help, type, default)
+ self._ininames.append(name)
+
+
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
+class OptionGroup:
+ def __init__(self, name, description="", parser=None):
+ self.name = name
+ self.description = description
+ self.options = []
+ self.parser = parser
+
+ def addoption(self, *optnames, **attrs):
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
+ be suppressed in the help. addoption('--twowords', '--two-words')
+ results in help showing '--two-words' only, but --twowords gets
+ accepted **and** the automatic destination is in args.twowords
+ """
+ option = Argument(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=False)
+
+ def _addoption(self, *optnames, **attrs):
+ option = Argument(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=True)
+
+ def _addoption_instance(self, option, shortupper=False):
+ if not shortupper:
+ for opt in option._short_opts:
+ if opt[0] == '-' and opt[1].islower():
+ raise ValueError("lowercase shortoptions reserved")
+ if self.parser:
+ self.parser.processoption(option)
+ self.options.append(option)
+
+
+class MyOptionParser(argparse.ArgumentParser):
+ def __init__(self, parser, extra_info=None):
+ if not extra_info:
+ extra_info = {}
+ self._parser = parser
+ argparse.ArgumentParser.__init__(self, usage=parser._usage,
+ add_help=False, formatter_class=DropShorterLongHelpFormatter)
+ # extra_info is a dict of (param -> value) to display if there's
+ # an usage error to provide more contextual information to the user
+ self.extra_info = extra_info
+
+ def parse_args(self, args=None, namespace=None):
+ """allow splitting of positional arguments"""
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ for arg in argv:
+ if arg and arg[0] == '-':
+ lines = ['unrecognized arguments: %s' % (' '.join(argv))]
+ for k, v in sorted(self.extra_info.items()):
+ lines.append(' %s: %s' % (k, v))
+ self.error('\n'.join(lines))
+ getattr(args, FILE_OR_DIR).extend(argv)
+ return args
+
+
+class DropShorterLongHelpFormatter(argparse.HelpFormatter):
+ """shorten help for long options that differ only in extra hyphens
+
+ - collapse **long** options that are the same except for extra hyphens
+ - special action attribute map_long_option allows surpressing additional
+ long options
+ - shortcut if there are only two options and one of them is a short one
+ - cache result on action object as this is called at least 2 times
+ """
+ def _format_action_invocation(self, action):
+ orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
+ if orgstr and orgstr[0] != '-': # only optional arguments
+ return orgstr
+ res = getattr(action, '_formatted_action_invocation', None)
+ if res:
+ return res
+ options = orgstr.split(', ')
+ if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
+ # a shortcut for '-h, --help' or '--abc', '-a'
+ action._formatted_action_invocation = orgstr
+ return orgstr
+ return_list = []
+ option_map = getattr(action, 'map_long_option', {})
+ if option_map is None:
+ option_map = {}
+ short_long = {}
+ for option in options:
+ if len(option) == 2 or option[2] == ' ':
+ continue
+ if not option.startswith('--'):
+ raise ArgumentError('long optional argument without "--": [%s]'
+ % (option), self)
+ xxoption = option[2:]
+ if xxoption.split()[0] not in option_map:
+ shortened = xxoption.replace('-', '')
+ if shortened not in short_long or \
+ len(short_long[shortened]) < len(xxoption):
+ short_long[shortened] = xxoption
+ # now short_long has been filled out to the longest with dashes
+ # **and** we keep the right option ordering from add_argument
+ for option in options: #
+ if len(option) == 2 or option[2] == ' ':
+ return_list.append(option)
+ if option[2:] == short_long.get(option.replace('-', '')):
+ return_list.append(option.replace(' ', '='))
+ action._formatted_action_invocation = ', '.join(return_list)
+ return action._formatted_action_invocation
+
+
+
+def _ensure_removed_sysmodule(modname):
+ try:
+ del sys.modules[modname]
+ except KeyError:
+ pass
+
+class CmdOptions(object):
+ """ holds cmdline options as attributes."""
+ def __init__(self, values=()):
+ self.__dict__.update(values)
+ def __repr__(self):
+ return "<CmdOptions %r>" %(self.__dict__,)
+ def copy(self):
+ return CmdOptions(self.__dict__)
+
+class Notset:
+ def __repr__(self):
+ return "<NOTSET>"
+
+notset = Notset()
+FILE_OR_DIR = 'file_or_dir'
+
+class Config(object):
+ """ access to configuration values, pluginmanager and plugin hooks. """
+
+ def __init__(self, pluginmanager):
+ #: access to command line option as attributes.
+ #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
+ self.option = CmdOptions()
+ _a = FILE_OR_DIR
+ self._parser = Parser(
+ usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
+ processopt=self._processopt,
+ )
+ #: a pluginmanager instance
+ self.pluginmanager = pluginmanager
+ self.trace = self.pluginmanager.trace.root.get("config")
+ self.hook = self.pluginmanager.hook
+ self._inicache = {}
+ self._opt2dest = {}
+ self._cleanup = []
+ self._warn = self.pluginmanager._warn
+ self.pluginmanager.register(self, "pytestconfig")
+ self._configured = False
+ def do_setns(dic):
+ import pytest
+ setns(pytest, dic)
+ self.hook.pytest_namespace.call_historic(do_setns, {})
+ self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
+
+ def add_cleanup(self, func):
+ """ Add a function to be called when the config object gets out of
+ use (usually coninciding with pytest_unconfigure)."""
+ self._cleanup.append(func)
+
+ def _do_configure(self):
+ assert not self._configured
+ self._configured = True
+ self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
+
+ def _ensure_unconfigure(self):
+ if self._configured:
+ self._configured = False
+ self.hook.pytest_unconfigure(config=self)
+ self.hook.pytest_configure._call_history = []
+ while self._cleanup:
+ fin = self._cleanup.pop()
+ fin()
+
+ def warn(self, code, message, fslocation=None):
+ """ generate a warning for this test session. """
+ self.hook.pytest_logwarning.call_historic(kwargs=dict(
+ code=code, message=message,
+ fslocation=fslocation, nodeid=None))
+
+ def get_terminal_writer(self):
+ return self.pluginmanager.get_plugin("terminalreporter")._tw
+
+ def pytest_cmdline_parse(self, pluginmanager, args):
+ # REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
+ self.parse(args)
+ return self
+
+ def notify_exception(self, excinfo, option=None):
+ if option and option.fulltrace:
+ style = "long"
+ else:
+ style = "native"
+ excrepr = excinfo.getrepr(funcargs=True,
+ showlocals=getattr(option, 'showlocals', False),
+ style=style,
+ )
+ res = self.hook.pytest_internalerror(excrepr=excrepr,
+ excinfo=excinfo)
+ if not py.builtin.any(res):
+ for line in str(excrepr).split("\n"):
+ sys.stderr.write("INTERNALERROR> %s\n" %line)
+ sys.stderr.flush()
+
+ def cwd_relative_nodeid(self, nodeid):
+ # nodeid's are relative to the rootpath, compute relative to cwd
+ if self.invocation_dir != self.rootdir:
+ fullpath = self.rootdir.join(nodeid)
+ nodeid = self.invocation_dir.bestrelpath(fullpath)
+ return nodeid
+
+ @classmethod
+ def fromdictargs(cls, option_dict, args):
+ """ constructor useable for subprocesses. """
+ config = get_config()
+ config.option.__dict__.update(option_dict)
+ config.parse(args, addopts=False)
+ for x in config.option.plugins:
+ config.pluginmanager.consider_pluginarg(x)
+ return config
+
+ def _processopt(self, opt):
+ for name in opt._short_opts + opt._long_opts:
+ self._opt2dest[name] = opt.dest
+
+ if hasattr(opt, 'default') and opt.dest:
+ if not hasattr(self.option, opt.dest):
+ setattr(self.option, opt.dest, opt.default)
+
+ @hookimpl(trylast=True)
+ def pytest_load_initial_conftests(self, early_config):
+ self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
+
+ def _initini(self, args):
+ ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
+ r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args)
+ self.rootdir, self.inifile, self.inicfg = r
+ self._parser.extra_info['rootdir'] = self.rootdir
+ self._parser.extra_info['inifile'] = self.inifile
+ self.invocation_dir = py.path.local()
+ self._parser.addini('addopts', 'extra command line options', 'args')
+ self._parser.addini('minversion', 'minimally required pytest version')
+
+ def _preparse(self, args, addopts=True):
+ self._initini(args)
+ if addopts:
+ args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
+ args[:] = self.getini("addopts") + args
+ self._checkversion()
+ self.pluginmanager.consider_preparse(args)
+ try:
+ self.pluginmanager.load_setuptools_entrypoints("pytest11")
+ except ImportError as e:
+ self.warn("I2", "could not load setuptools entry import: %s" % (e,))
+ self.pluginmanager.consider_env()
+ self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
+ if self.known_args_namespace.confcutdir is None and self.inifile:
+ confcutdir = py.path.local(self.inifile).dirname
+ self.known_args_namespace.confcutdir = confcutdir
+ try:
+ self.hook.pytest_load_initial_conftests(early_config=self,
+ args=args, parser=self._parser)
+ except ConftestImportFailure:
+ e = sys.exc_info()[1]
+ if ns.help or ns.version:
+ # we don't want to prevent --help/--version to work
+ # so just let is pass and print a warning at the end
+ self._warn("could not load initial conftests (%s)\n" % e.path)
+ else:
+ raise
+
+ def _checkversion(self):
+ import pytest
+ minver = self.inicfg.get('minversion', None)
+ if minver:
+ ver = minver.split(".")
+ myver = pytest.__version__.split(".")
+ if myver < ver:
+ raise pytest.UsageError(
+ "%s:%d: requires pytest-%s, actual pytest-%s'" %(
+ self.inicfg.config.path, self.inicfg.lineof('minversion'),
+ minver, pytest.__version__))
+
+ def parse(self, args, addopts=True):
+ # parse given cmdline arguments into this config object.
+ assert not hasattr(self, 'args'), (
+ "can only parse cmdline args at most once per Config object")
+ self._origargs = args
+ self.hook.pytest_addhooks.call_historic(
+ kwargs=dict(pluginmanager=self.pluginmanager))
+ self._preparse(args, addopts=addopts)
+ # XXX deprecated hook:
+ self.hook.pytest_cmdline_preparse(config=self, args=args)
+ args = self._parser.parse_setoption(args, self.option, namespace=self.option)
+ if not args:
+ cwd = os.getcwd()
+ if cwd == self.rootdir:
+ args = self.getini('testpaths')
+ if not args:
+ args = [cwd]
+ self.args = args
+
+ def addinivalue_line(self, name, line):
+ """ add a line to an ini-file option. The option must have been
+ declared but might not yet be set in which case the line becomes the
+ the first line in its value. """
+ x = self.getini(name)
+ assert isinstance(x, list)
+ x.append(line) # modifies the cached list inline
+
+ def getini(self, name):
+ """ return configuration value from an :ref:`ini file <inifiles>`. If the
+ specified name hasn't been registered through a prior
+ :py:func:`parser.addini <pytest.config.Parser.addini>`
+ call (usually from a plugin), a ValueError is raised. """
+ try:
+ return self._inicache[name]
+ except KeyError:
+ self._inicache[name] = val = self._getini(name)
+ return val
+
+ def _getini(self, name):
+ try:
+ description, type, default = self._parser._inidict[name]
+ except KeyError:
+ raise ValueError("unknown configuration value: %r" %(name,))
+ try:
+ value = self.inicfg[name]
+ except KeyError:
+ if default is not None:
+ return default
+ if type is None:
+ return ''
+ return []
+ if type == "pathlist":
+ dp = py.path.local(self.inicfg.config.path).dirpath()
+ l = []
+ for relpath in shlex.split(value):
+ l.append(dp.join(relpath, abs=True))
+ return l
+ elif type == "args":
+ return shlex.split(value)
+ elif type == "linelist":
+ return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
+ elif type == "bool":
+ return bool(_strtobool(value.strip()))
+ else:
+ assert type is None
+ return value
+
+ def _getconftest_pathlist(self, name, path):
+ try:
+ mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
+ except KeyError:
+ return None
+ modpath = py.path.local(mod.__file__).dirpath()
+ l = []
+ for relroot in relroots:
+ if not isinstance(relroot, py.path.local):
+ relroot = relroot.replace("/", py.path.local.sep)
+ relroot = modpath.join(relroot, abs=True)
+ l.append(relroot)
+ return l
+
+ def getoption(self, name, default=notset, skip=False):
+ """ return command line option value.
+
+ :arg name: name of the option. You may also specify
+ the literal ``--OPT`` option instead of the "dest" option name.
+ :arg default: default value if no option of that name exists.
+ :arg skip: if True raise pytest.skip if option does not exists
+ or has a None value.
+ """
+ name = self._opt2dest.get(name, name)
+ try:
+ val = getattr(self.option, name)
+ if val is None and skip:
+ raise AttributeError(name)
+ return val
+ except AttributeError:
+ if default is not notset:
+ return default
+ if skip:
+ import pytest
+ pytest.skip("no %r option found" %(name,))
+ raise ValueError("no option named %r" % (name,))
+
+ def getvalue(self, name, path=None):
+ """ (deprecated, use getoption()) """
+ return self.getoption(name)
+
+ def getvalueorskip(self, name, path=None):
+ """ (deprecated, use getoption(skip=True)) """
+ return self.getoption(name, skip=True)
+
+def exists(path, ignore=EnvironmentError):
+ try:
+ return path.check()
+ except ignore:
+ return False
+
+def getcfg(args, inibasenames):
+ args = [x for x in args if not str(x).startswith("-")]
+ if not args:
+ args = [py.path.local()]
+ for arg in args:
+ arg = py.path.local(arg)
+ for base in arg.parts(reverse=True):
+ for inibasename in inibasenames:
+ p = base.join(inibasename)
+ if exists(p):
+ iniconfig = py.iniconfig.IniConfig(p)
+ if 'pytest' in iniconfig.sections:
+ return base, p, iniconfig['pytest']
+ elif inibasename == "pytest.ini":
+ # allowed to be empty
+ return base, p, {}
+ return None, None, None
+
+
+def get_common_ancestor(args):
+ # args are what we get after early command line parsing (usually
+ # strings, but can be py.path.local objects as well)
+ common_ancestor = None
+ for arg in args:
+ if str(arg)[0] == "-":
+ continue
+ p = py.path.local(arg)
+ if common_ancestor is None:
+ common_ancestor = p
+ else:
+ if p.relto(common_ancestor) or p == common_ancestor:
+ continue
+ elif common_ancestor.relto(p):
+ common_ancestor = p
+ else:
+ shared = p.common(common_ancestor)
+ if shared is not None:
+ common_ancestor = shared
+ if common_ancestor is None:
+ common_ancestor = py.path.local()
+ elif not common_ancestor.isdir():
+ common_ancestor = common_ancestor.dirpath()
+ return common_ancestor
+
+
+def determine_setup(inifile, args):
+ if inifile:
+ iniconfig = py.iniconfig.IniConfig(inifile)
+ try:
+ inicfg = iniconfig["pytest"]
+ except KeyError:
+ inicfg = None
+ rootdir = get_common_ancestor(args)
+ else:
+ ancestor = get_common_ancestor(args)
+ rootdir, inifile, inicfg = getcfg(
+ [ancestor], ["pytest.ini", "tox.ini", "setup.cfg"])
+ if rootdir is None:
+ for rootdir in ancestor.parts(reverse=True):
+ if rootdir.join("setup.py").exists():
+ break
+ else:
+ rootdir = ancestor
+ return rootdir, inifile, inicfg or {}
+
+
+def setns(obj, dic):
+ import pytest
+ for name, value in dic.items():
+ if isinstance(value, dict):
+ mod = getattr(obj, name, None)
+ if mod is None:
+ modname = "pytest.%s" % name
+ mod = types.ModuleType(modname)
+ sys.modules[modname] = mod
+ mod.__all__ = []
+ setattr(obj, name, mod)
+ obj.__all__.append(name)
+ setns(mod, value)
+ else:
+ setattr(obj, name, value)
+ obj.__all__.append(name)
+ #if obj != pytest:
+ # pytest.__all__.append(name)
+ setattr(pytest, name, value)
+
+
+def create_terminal_writer(config, *args, **kwargs):
+ """Create a TerminalWriter instance configured according to the options
+ in the config object. Every code which requires a TerminalWriter object
+ and has access to a config object should use this function.
+ """
+ tw = py.io.TerminalWriter(*args, **kwargs)
+ if config.option.color == 'yes':
+ tw.hasmarkup = True
+ if config.option.color == 'no':
+ tw.hasmarkup = False
+ return tw
+
+
+def _strtobool(val):
+ """Convert a string representation of truth to true (1) or false (0).
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+
+ .. note:: copied from distutils.util
+ """
+ val = val.lower()
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
+ return 1
+ elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/doctest.py b/testing/web-platform/tests/tools/pytest/_pytest/doctest.py
new file mode 100644
index 000000000..a57f7a494
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/doctest.py
@@ -0,0 +1,290 @@
+""" discover and run doctests in modules and test files."""
+from __future__ import absolute_import
+
+import traceback
+
+import pytest
+from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
+from _pytest.python import FixtureRequest
+
+
+
+def pytest_addoption(parser):
+ parser.addini('doctest_optionflags', 'option flags for doctests',
+ type="args", default=["ELLIPSIS"])
+ group = parser.getgroup("collect")
+ group.addoption("--doctest-modules",
+ action="store_true", default=False,
+ help="run doctests in all .py modules",
+ dest="doctestmodules")
+ group.addoption("--doctest-glob",
+ action="append", default=[], metavar="pat",
+ help="doctests file matching pattern, default: test*.txt",
+ dest="doctestglob")
+ group.addoption("--doctest-ignore-import-errors",
+ action="store_true", default=False,
+ help="ignore doctest ImportErrors",
+ dest="doctest_ignore_import_errors")
+
+
+def pytest_collect_file(path, parent):
+ config = parent.config
+ if path.ext == ".py":
+ if config.option.doctestmodules:
+ return DoctestModule(path, parent)
+ elif _is_doctest(config, path, parent):
+ return DoctestTextfile(path, parent)
+
+
+def _is_doctest(config, path, parent):
+ if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
+ return True
+ globs = config.getoption("doctestglob") or ['test*.txt']
+ for glob in globs:
+ if path.check(fnmatch=glob):
+ return True
+ return False
+
+
+class ReprFailDoctest(TerminalRepr):
+
+ def __init__(self, reprlocation, lines):
+ self.reprlocation = reprlocation
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+ self.reprlocation.toterminal(tw)
+
+
+class DoctestItem(pytest.Item):
+
+ def __init__(self, name, parent, runner=None, dtest=None):
+ super(DoctestItem, self).__init__(name, parent)
+ self.runner = runner
+ self.dtest = dtest
+ self.obj = None
+ self.fixture_request = None
+
+ def setup(self):
+ if self.dtest is not None:
+ self.fixture_request = _setup_fixtures(self)
+ globs = dict(getfixture=self.fixture_request.getfuncargvalue)
+ self.dtest.globs.update(globs)
+
+ def runtest(self):
+ _check_all_skipped(self.dtest)
+ self.runner.run(self.dtest)
+
+ def repr_failure(self, excinfo):
+ import doctest
+ if excinfo.errisinstance((doctest.DocTestFailure,
+ doctest.UnexpectedException)):
+ doctestfailure = excinfo.value
+ example = doctestfailure.example
+ test = doctestfailure.test
+ filename = test.filename
+ if test.lineno is None:
+ lineno = None
+ else:
+ lineno = test.lineno + example.lineno + 1
+ message = excinfo.type.__name__
+ reprlocation = ReprFileLocation(filename, lineno, message)
+ checker = _get_checker()
+ REPORT_UDIFF = doctest.REPORT_UDIFF
+ if lineno is not None:
+ lines = doctestfailure.test.docstring.splitlines(False)
+ # add line numbers to the left of the error message
+ lines = ["%03d %s" % (i + test.lineno + 1, x)
+ for (i, x) in enumerate(lines)]
+ # trim docstring error lines to 10
+ lines = lines[example.lineno - 9:example.lineno + 1]
+ else:
+ lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
+ indent = '>>>'
+ for line in example.source.splitlines():
+ lines.append('??? %s %s' % (indent, line))
+ indent = '...'
+ if excinfo.errisinstance(doctest.DocTestFailure):
+ lines += checker.output_difference(example,
+ doctestfailure.got, REPORT_UDIFF).split("\n")
+ else:
+ inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
+ lines += ["UNEXPECTED EXCEPTION: %s" %
+ repr(inner_excinfo.value)]
+ lines += traceback.format_exception(*excinfo.value.exc_info)
+ return ReprFailDoctest(reprlocation, lines)
+ else:
+ return super(DoctestItem, self).repr_failure(excinfo)
+
+ def reportinfo(self):
+ return self.fspath, None, "[doctest] %s" % self.name
+
+
+def _get_flag_lookup():
+ import doctest
+ return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
+ DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
+ NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
+ ELLIPSIS=doctest.ELLIPSIS,
+ IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
+ COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
+ ALLOW_UNICODE=_get_allow_unicode_flag(),
+ ALLOW_BYTES=_get_allow_bytes_flag(),
+ )
+
+
+def get_optionflags(parent):
+ optionflags_str = parent.config.getini("doctest_optionflags")
+ flag_lookup_table = _get_flag_lookup()
+ flag_acc = 0
+ for flag in optionflags_str:
+ flag_acc |= flag_lookup_table[flag]
+ return flag_acc
+
+
+class DoctestTextfile(DoctestItem, pytest.Module):
+
+ def runtest(self):
+ import doctest
+ fixture_request = _setup_fixtures(self)
+
+ # inspired by doctest.testfile; ideally we would use it directly,
+ # but it doesn't support passing a custom checker
+ text = self.fspath.read()
+ filename = str(self.fspath)
+ name = self.fspath.basename
+ globs = dict(getfixture=fixture_request.getfuncargvalue)
+ if '__name__' not in globs:
+ globs['__name__'] = '__main__'
+
+ optionflags = get_optionflags(self)
+ runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
+ checker=_get_checker())
+
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(text, globs, name, filename, 0)
+ _check_all_skipped(test)
+ runner.run(test)
+
+
+def _check_all_skipped(test):
+ """raises pytest.skip() if all examples in the given DocTest have the SKIP
+ option set.
+ """
+ import doctest
+ all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
+ if all_skipped:
+ pytest.skip('all tests skipped by +SKIP option')
+
+
+class DoctestModule(pytest.Module):
+ def collect(self):
+ import doctest
+ if self.fspath.basename == "conftest.py":
+ module = self.config.pluginmanager._importconftest(self.fspath)
+ else:
+ try:
+ module = self.fspath.pyimport()
+ except ImportError:
+ if self.config.getvalue('doctest_ignore_import_errors'):
+ pytest.skip('unable to import module %r' % self.fspath)
+ else:
+ raise
+ # uses internal doctest module parsing mechanism
+ finder = doctest.DocTestFinder()
+ optionflags = get_optionflags(self)
+ runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
+ checker=_get_checker())
+ for test in finder.find(module, module.__name__):
+ if test.examples: # skip empty doctests
+ yield DoctestItem(test.name, self, runner, test)
+
+
+def _setup_fixtures(doctest_item):
+ """
+ Used by DoctestTextfile and DoctestItem to setup fixture information.
+ """
+ def func():
+ pass
+
+ doctest_item.funcargs = {}
+ fm = doctest_item.session._fixturemanager
+ doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
+ cls=None, funcargs=False)
+ fixture_request = FixtureRequest(doctest_item)
+ fixture_request._fillfixtures()
+ return fixture_request
+
+
+def _get_checker():
+ """
+ Returns a doctest.OutputChecker subclass that takes in account the
+ ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
+ to strip b'' prefixes.
+ Useful when the same doctest should run in Python 2 and Python 3.
+
+ An inner class is used to avoid importing "doctest" at the module
+ level.
+ """
+ if hasattr(_get_checker, 'LiteralsOutputChecker'):
+ return _get_checker.LiteralsOutputChecker()
+
+ import doctest
+ import re
+
+ class LiteralsOutputChecker(doctest.OutputChecker):
+ """
+ Copied from doctest_nose_plugin.py from the nltk project:
+ https://github.com/nltk/nltk
+
+ Further extended to also support byte literals.
+ """
+
+ _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
+ _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
+
+ def check_output(self, want, got, optionflags):
+ res = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if res:
+ return True
+
+ allow_unicode = optionflags & _get_allow_unicode_flag()
+ allow_bytes = optionflags & _get_allow_bytes_flag()
+ if not allow_unicode and not allow_bytes:
+ return False
+
+ else: # pragma: no cover
+ def remove_prefixes(regex, txt):
+ return re.sub(regex, r'\1\2', txt)
+
+ if allow_unicode:
+ want = remove_prefixes(self._unicode_literal_re, want)
+ got = remove_prefixes(self._unicode_literal_re, got)
+ if allow_bytes:
+ want = remove_prefixes(self._bytes_literal_re, want)
+ got = remove_prefixes(self._bytes_literal_re, got)
+ res = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ return res
+
+ _get_checker.LiteralsOutputChecker = LiteralsOutputChecker
+ return _get_checker.LiteralsOutputChecker()
+
+
+def _get_allow_unicode_flag():
+ """
+ Registers and returns the ALLOW_UNICODE flag.
+ """
+ import doctest
+ return doctest.register_optionflag('ALLOW_UNICODE')
+
+
+def _get_allow_bytes_flag():
+ """
+ Registers and returns the ALLOW_BYTES flag.
+ """
+ import doctest
+ return doctest.register_optionflag('ALLOW_BYTES')
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/genscript.py b/testing/web-platform/tests/tools/pytest/_pytest/genscript.py
new file mode 100755
index 000000000..d2962d8fc
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/genscript.py
@@ -0,0 +1,132 @@
+""" (deprecated) generate a single-file self-contained version of pytest """
+import os
+import sys
+import pkgutil
+
+import py
+import _pytest
+
+
+
+def find_toplevel(name):
+ for syspath in sys.path:
+ base = py.path.local(syspath)
+ lib = base/name
+ if lib.check(dir=1):
+ return lib
+ mod = base.join("%s.py" % name)
+ if mod.check(file=1):
+ return mod
+ raise LookupError(name)
+
+def pkgname(toplevel, rootpath, path):
+ parts = path.parts()[len(rootpath.parts()):]
+ return '.'.join([toplevel] + [x.purebasename for x in parts])
+
+def pkg_to_mapping(name):
+ toplevel = find_toplevel(name)
+ name2src = {}
+ if toplevel.check(file=1): # module
+ name2src[toplevel.purebasename] = toplevel.read()
+ else: # package
+ for pyfile in toplevel.visit('*.py'):
+ pkg = pkgname(name, toplevel, pyfile)
+ name2src[pkg] = pyfile.read()
+ # with wheels py source code might be not be installed
+ # and the resulting genscript is useless, just bail out.
+ assert name2src, "no source code found for %r at %r" %(name, toplevel)
+ return name2src
+
+def compress_mapping(mapping):
+ import base64, pickle, zlib
+ data = pickle.dumps(mapping, 2)
+ data = zlib.compress(data, 9)
+ data = base64.encodestring(data)
+ data = data.decode('ascii')
+ return data
+
+
+def compress_packages(names):
+ mapping = {}
+ for name in names:
+ mapping.update(pkg_to_mapping(name))
+ return compress_mapping(mapping)
+
+def generate_script(entry, packages):
+ data = compress_packages(packages)
+ tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
+ exe = tmpl.read()
+ exe = exe.replace('@SOURCES@', data)
+ exe = exe.replace('@ENTRY@', entry)
+ return exe
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption("--genscript", action="store", default=None,
+ dest="genscript", metavar="path",
+ help="create standalone pytest script at given target path.")
+
+def pytest_cmdline_main(config):
+ import _pytest.config
+ genscript = config.getvalue("genscript")
+ if genscript:
+ tw = _pytest.config.create_terminal_writer(config)
+ tw.line("WARNING: usage of genscript is deprecated.",
+ red=True)
+ deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
+ if sys.version_info < (2,7):
+ deps.append("argparse")
+ tw.line("generated script will run on python2.6-python3.3++")
+ else:
+ tw.line("WARNING: generated script will not run on python2.6 "
+ "due to 'argparse' dependency. Use python2.6 "
+ "to generate a python2.6 compatible script", red=True)
+ script = generate_script(
+ 'import pytest; raise SystemExit(pytest.cmdline.main())',
+ deps,
+ )
+ genscript = py.path.local(genscript)
+ genscript.write(script)
+ tw.line("generated pytest standalone script: %s" % genscript,
+ bold=True)
+ return 0
+
+
+def pytest_namespace():
+ return {'freeze_includes': freeze_includes}
+
+
+def freeze_includes():
+ """
+ Returns a list of module names used by py.test that should be
+ included by cx_freeze.
+ """
+ result = list(_iter_all_modules(py))
+ result += list(_iter_all_modules(_pytest))
+ return result
+
+
+def _iter_all_modules(package, prefix=''):
+ """
+ Iterates over the names of all modules that can be found in the given
+ package, recursively.
+
+ Example:
+ _iter_all_modules(_pytest) ->
+ ['_pytest.assertion.newinterpret',
+ '_pytest.capture',
+ '_pytest.core',
+ ...
+ ]
+ """
+ if type(package) is not str:
+ path, prefix = package.__path__[0], package.__name__ + '.'
+ else:
+ path = package
+ for _, name, is_package in pkgutil.iter_modules([path]):
+ if is_package:
+ for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
+ yield prefix + m
+ else:
+ yield prefix + name
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/helpconfig.py b/testing/web-platform/tests/tools/pytest/_pytest/helpconfig.py
new file mode 100644
index 000000000..1df0c56ac
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/helpconfig.py
@@ -0,0 +1,139 @@
+""" version info, help messages, tracing configuration. """
+import py
+import pytest
+import os, sys
+
+def pytest_addoption(parser):
+ group = parser.getgroup('debugconfig')
+ group.addoption('--version', action="store_true",
+ help="display pytest lib version and import information.")
+ group._addoption("-h", "--help", action="store_true", dest="help",
+ help="show help message and configuration info")
+ group._addoption('-p', action="append", dest="plugins", default = [],
+ metavar="name",
+ help="early-load given plugin (multi-allowed). "
+ "To avoid loading of plugins, use the `no:` prefix, e.g. "
+ "`no:doctest`.")
+ group.addoption('--traceconfig', '--trace-config',
+ action="store_true", default=False,
+ help="trace considerations of conftest.py files."),
+ group.addoption('--debug',
+ action="store_true", dest="debug", default=False,
+ help="store internal tracing debug information in 'pytestdebug.log'.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_cmdline_parse():
+ outcome = yield
+ config = outcome.get_result()
+ if config.option.debug:
+ path = os.path.abspath("pytestdebug.log")
+ debugfile = open(path, 'w')
+ debugfile.write("versions pytest-%s, py-%s, "
+ "python-%s\ncwd=%s\nargs=%s\n\n" %(
+ pytest.__version__, py.__version__,
+ ".".join(map(str, sys.version_info)),
+ os.getcwd(), config._origargs))
+ config.trace.root.setwriter(debugfile.write)
+ undo_tracing = config.pluginmanager.enable_tracing()
+ sys.stderr.write("writing pytestdebug information to %s\n" % path)
+ def unset_tracing():
+ debugfile.close()
+ sys.stderr.write("wrote pytestdebug information to %s\n" %
+ debugfile.name)
+ config.trace.root.setwriter(None)
+ undo_tracing()
+ config.add_cleanup(unset_tracing)
+
+def pytest_cmdline_main(config):
+ if config.option.version:
+ p = py.path.local(pytest.__file__)
+ sys.stderr.write("This is pytest version %s, imported from %s\n" %
+ (pytest.__version__, p))
+ plugininfo = getpluginversioninfo(config)
+ if plugininfo:
+ for line in plugininfo:
+ sys.stderr.write(line + "\n")
+ return 0
+ elif config.option.help:
+ config._do_configure()
+ showhelp(config)
+ config._ensure_unconfigure()
+ return 0
+
+def showhelp(config):
+ reporter = config.pluginmanager.get_plugin('terminalreporter')
+ tw = reporter._tw
+ tw.write(config._parser.optparser.format_help())
+ tw.line()
+ tw.line()
+ #tw.sep( "=", "config file settings")
+ tw.line("[pytest] ini-options in the next "
+ "pytest.ini|tox.ini|setup.cfg file:")
+ tw.line()
+
+ for name in config._parser._ininames:
+ help, type, default = config._parser._inidict[name]
+ if type is None:
+ type = "string"
+ spec = "%s (%s)" % (name, type)
+ line = " %-24s %s" %(spec, help)
+ tw.line(line[:tw.fullwidth])
+
+ tw.line()
+ tw.line("environment variables:")
+ vars = [
+ ("PYTEST_ADDOPTS", "extra command line options"),
+ ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
+ ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
+ ]
+ for name, help in vars:
+ tw.line(" %-24s %s" % (name, help))
+ tw.line()
+ tw.line()
+
+ tw.line("to see available markers type: py.test --markers")
+ tw.line("to see available fixtures type: py.test --fixtures")
+ tw.line("(shown according to specified file_or_dir or current dir "
+ "if not specified)")
+
+ for warningreport in reporter.stats.get('warnings', []):
+ tw.line("warning : " + warningreport.message, red=True)
+ return
+
+
+conftest_options = [
+ ('pytest_plugins', 'list of plugin names to load'),
+]
+
+def getpluginversioninfo(config):
+ lines = []
+ plugininfo = config.pluginmanager.list_plugin_distinfo()
+ if plugininfo:
+ lines.append("setuptools registered plugins:")
+ for plugin, dist in plugininfo:
+ loc = getattr(plugin, '__file__', repr(plugin))
+ content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
+ lines.append(" " + content)
+ return lines
+
+def pytest_report_header(config):
+ lines = []
+ if config.option.debug or config.option.traceconfig:
+ lines.append("using: pytest-%s pylib-%s" %
+ (pytest.__version__,py.__version__))
+
+ verinfo = getpluginversioninfo(config)
+ if verinfo:
+ lines.extend(verinfo)
+
+ if config.option.traceconfig:
+ lines.append("active plugins:")
+ items = config.pluginmanager.list_name_plugin()
+ for name, plugin in items:
+ if hasattr(plugin, '__file__'):
+ r = plugin.__file__
+ else:
+ r = repr(plugin)
+ lines.append(" %-20s: %s" %(name, r))
+ return lines
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/hookspec.py b/testing/web-platform/tests/tools/pytest/_pytest/hookspec.py
new file mode 100644
index 000000000..60e9b47d2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/hookspec.py
@@ -0,0 +1,295 @@
+""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
+
+from _pytest._pluggy import HookspecMarker
+
+hookspec = HookspecMarker("pytest")
+
+# -------------------------------------------------------------------------
+# Initialization hooks called for every plugin
+# -------------------------------------------------------------------------
+
+@hookspec(historic=True)
+def pytest_addhooks(pluginmanager):
+ """called at plugin registration time to allow adding new hooks via a call to
+ pluginmanager.add_hookspecs(module_or_class, prefix)."""
+
+
+@hookspec(historic=True)
+def pytest_namespace():
+ """return dict of name->object to be made globally available in
+ the pytest namespace. This hook is called at plugin registration
+ time.
+ """
+
+@hookspec(historic=True)
+def pytest_plugin_registered(plugin, manager):
+ """ a new pytest plugin got registered. """
+
+
+@hookspec(historic=True)
+def pytest_addoption(parser):
+ """register argparse-style options and ini-style config values,
+ called once at the beginning of a test run.
+
+ .. note::
+
+ This function should be implemented only in plugins or ``conftest.py``
+ files situated at the tests root directory due to how py.test
+ :ref:`discovers plugins during startup <pluginorder>`.
+
+ :arg parser: To add command line options, call
+ :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
+ To add ini-file values call :py:func:`parser.addini(...)
+ <_pytest.config.Parser.addini>`.
+
+ Options can later be accessed through the
+ :py:class:`config <_pytest.config.Config>` object, respectively:
+
+ - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
+ retrieve the value of a command line option.
+
+ - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
+ a value read from an ini-style file.
+
+ The config object is passed around on many internal objects via the ``.config``
+ attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
+ via (deprecated) ``pytest.config``.
+ """
+
+@hookspec(historic=True)
+def pytest_configure(config):
+ """ called after command line options have been parsed
+ and all plugins and initial conftest files been loaded.
+ This hook is called for every plugin.
+ """
+
+# -------------------------------------------------------------------------
+# Bootstrapping hooks called for plugins registered early enough:
+# internal and 3rd party plugins as well as directly
+# discoverable conftest.py local plugins.
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_cmdline_parse(pluginmanager, args):
+ """return initialized config object, parsing the specified args. """
+
+def pytest_cmdline_preparse(config, args):
+ """(deprecated) modify command line arguments before option parsing. """
+
+@hookspec(firstresult=True)
+def pytest_cmdline_main(config):
+ """ called for performing the main command line action. The default
+ implementation will invoke the configure hooks and runtest_mainloop. """
+
+def pytest_load_initial_conftests(early_config, parser, args):
+ """ implements the loading of initial conftest files ahead
+ of command line option parsing. """
+
+
+# -------------------------------------------------------------------------
+# collection hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_collection(session):
+ """ perform the collection protocol for the given session. """
+
+def pytest_collection_modifyitems(session, config, items):
+ """ called after collection has been performed, may filter or re-order
+ the items in-place."""
+
+def pytest_collection_finish(session):
+ """ called after collection has been performed and modified. """
+
+@hookspec(firstresult=True)
+def pytest_ignore_collect(path, config):
+ """ return True to prevent considering this path for collection.
+ This hook is consulted for all files and directories prior to calling
+ more specific hooks.
+ """
+
+@hookspec(firstresult=True)
+def pytest_collect_directory(path, parent):
+ """ called before traversing a directory for collection files. """
+
+def pytest_collect_file(path, parent):
+ """ return collection Node or None for the given path. Any new node
+ needs to have the specified ``parent`` as a parent."""
+
+# logging hooks for collection
+def pytest_collectstart(collector):
+ """ collector starts collecting. """
+
+def pytest_itemcollected(item):
+ """ we just collected a test item. """
+
+def pytest_collectreport(report):
+ """ collector finished collecting. """
+
+def pytest_deselected(items):
+ """ called for test items deselected by keyword. """
+
+@hookspec(firstresult=True)
+def pytest_make_collect_report(collector):
+ """ perform ``collector.collect()`` and return a CollectReport. """
+
+# -------------------------------------------------------------------------
+# Python test function related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_pycollect_makemodule(path, parent):
+ """ return a Module collector or None for the given path.
+ This hook will be called for each matching test module path.
+ The pytest_collect_file hook needs to be used if you want to
+ create test modules for files that do not match as a test module.
+ """
+
+@hookspec(firstresult=True)
+def pytest_pycollect_makeitem(collector, name, obj):
+ """ return custom item/collector for a python object in a module, or None. """
+
+@hookspec(firstresult=True)
+def pytest_pyfunc_call(pyfuncitem):
+ """ call underlying test function. """
+
+def pytest_generate_tests(metafunc):
+ """ generate (multiple) parametrized calls to a test function."""
+
+# -------------------------------------------------------------------------
+# generic runtest related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_runtestloop(session):
+ """ called for performing the main runtest loop
+ (after collection finished). """
+
+def pytest_itemstart(item, node):
+ """ (deprecated, use pytest_runtest_logstart). """
+
+@hookspec(firstresult=True)
+def pytest_runtest_protocol(item, nextitem):
+ """ implements the runtest_setup/call/teardown protocol for
+ the given test item, including capturing exceptions and calling
+ reporting hooks.
+
+ :arg item: test item for which the runtest protocol is performed.
+
+ :arg nextitem: the scheduled-to-be-next test item (or None if this
+ is the end my friend). This argument is passed on to
+ :py:func:`pytest_runtest_teardown`.
+
+ :return boolean: True if no further hook implementations should be invoked.
+ """
+
+def pytest_runtest_logstart(nodeid, location):
+ """ signal the start of running a single test item. """
+
+def pytest_runtest_setup(item):
+ """ called before ``pytest_runtest_call(item)``. """
+
+def pytest_runtest_call(item):
+ """ called to execute the test ``item``. """
+
+def pytest_runtest_teardown(item, nextitem):
+ """ called after ``pytest_runtest_call``.
+
+ :arg nextitem: the scheduled-to-be-next test item (None if no further
+ test item is scheduled). This argument can be used to
+ perform exact teardowns, i.e. calling just enough finalizers
+ so that nextitem only needs to call setup-functions.
+ """
+
+@hookspec(firstresult=True)
+def pytest_runtest_makereport(item, call):
+ """ return a :py:class:`_pytest.runner.TestReport` object
+ for the given :py:class:`pytest.Item` and
+ :py:class:`_pytest.runner.CallInfo`.
+ """
+
+def pytest_runtest_logreport(report):
+ """ process a test setup/call/teardown report relating to
+ the respective phase of executing a test. """
+
+# -------------------------------------------------------------------------
+# test session related hooks
+# -------------------------------------------------------------------------
+
+def pytest_sessionstart(session):
+ """ before session.main() is called. """
+
+def pytest_sessionfinish(session, exitstatus):
+ """ whole test run finishes. """
+
+def pytest_unconfigure(config):
+ """ called before test process is exited. """
+
+
+# -------------------------------------------------------------------------
+# hooks for customising the assert methods
+# -------------------------------------------------------------------------
+
+def pytest_assertrepr_compare(config, op, left, right):
+ """return explanation for comparisons in failing assert expressions.
+
+ Return None for no custom explanation, otherwise return a list
+ of strings. The strings will be joined by newlines but any newlines
+ *in* a string will be escaped. Note that all but the first line will
+ be indented sligthly, the intention is for the first line to be a summary.
+ """
+
+# -------------------------------------------------------------------------
+# hooks for influencing reporting (invoked from _pytest_terminal)
+# -------------------------------------------------------------------------
+
+def pytest_report_header(config, startdir):
+ """ return a string to be displayed as header info for terminal reporting."""
+
+@hookspec(firstresult=True)
+def pytest_report_teststatus(report):
+ """ return result-category, shortletter and verbose word for reporting."""
+
+def pytest_terminal_summary(terminalreporter):
+ """ add additional section in terminal summary reporting. """
+
+
+@hookspec(historic=True)
+def pytest_logwarning(message, code, nodeid, fslocation):
+ """ process a warning specified by a message, a code string,
+ a nodeid and fslocation (both of which may be None
+ if the warning is not tied to a partilar node/location)."""
+
+# -------------------------------------------------------------------------
+# doctest hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_doctest_prepare_content(content):
+ """ return processed content for a given doctest"""
+
+# -------------------------------------------------------------------------
+# error handling and internal debugging hooks
+# -------------------------------------------------------------------------
+
+def pytest_internalerror(excrepr, excinfo):
+ """ called for internal errors. """
+
+def pytest_keyboard_interrupt(excinfo):
+ """ called for keyboard interrupt. """
+
+def pytest_exception_interact(node, call, report):
+ """called when an exception was raised which can potentially be
+ interactively handled.
+
+ This hook is only called if an exception was raised
+ that is not an internal exception like ``skip.Exception``.
+ """
+
+def pytest_enter_pdb(config):
+ """ called upon pdb.set_trace(), can be used by plugins to take special
+ action just before the python debugger enters in interactive mode.
+
+ :arg config: pytest config object
+ :type config: _pytest.config.Config
+ """
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/impl b/testing/web-platform/tests/tools/pytest/_pytest/impl
new file mode 100644
index 000000000..889e37e5a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/impl
@@ -0,0 +1,254 @@
+Sorting per-resource
+-----------------------------
+
+for any given set of items:
+
+- collect items per session-scoped parametrized funcarg
+- re-order until items no parametrizations are mixed
+
+ examples:
+
+ test()
+ test1(s1)
+ test1(s2)
+ test2()
+ test3(s1)
+ test3(s2)
+
+ gets sorted to:
+
+ test()
+ test2()
+ test1(s1)
+ test3(s1)
+ test1(s2)
+ test3(s2)
+
+
+the new @setup functions
+--------------------------------------
+
+Consider a given @setup-marked function::
+
+ @pytest.mark.setup(maxscope=SCOPE)
+ def mysetup(request, arg1, arg2, ...)
+ ...
+ request.addfinalizer(fin)
+ ...
+
+then FUNCARGSET denotes the set of (arg1, arg2, ...) funcargs and
+all of its dependent funcargs. The mysetup function will execute
+for any matching test item once per scope.
+
+The scope is determined as the minimum scope of all scopes of the args
+in FUNCARGSET and the given "maxscope".
+
+If mysetup has been called and no finalizers have been called it is
+called "active".
+
+Furthermore the following rules apply:
+
+- if an arg value in FUNCARGSET is about to be torn down, the
+ mysetup-registered finalizers will execute as well.
+
+- There will never be two active mysetup invocations.
+
+Example 1, session scope::
+
+ @pytest.mark.funcarg(scope="session", params=[1,2])
+ def db(request):
+ request.addfinalizer(db_finalize)
+
+ @pytest.mark.setup
+ def mysetup(request, db):
+ request.addfinalizer(mysetup_finalize)
+ ...
+
+And a given test module:
+
+ def test_something():
+ ...
+ def test_otherthing():
+ pass
+
+Here is what happens::
+
+ db(request) executes with request.param == 1
+ mysetup(request, db) executes
+ test_something() executes
+ test_otherthing() executes
+ mysetup_finalize() executes
+ db_finalize() executes
+ db(request) executes with request.param == 2
+ mysetup(request, db) executes
+ test_something() executes
+ test_otherthing() executes
+ mysetup_finalize() executes
+ db_finalize() executes
+
+Example 2, session/function scope::
+
+ @pytest.mark.funcarg(scope="session", params=[1,2])
+ def db(request):
+ request.addfinalizer(db_finalize)
+
+ @pytest.mark.setup(scope="function")
+ def mysetup(request, db):
+ ...
+ request.addfinalizer(mysetup_finalize)
+ ...
+
+And a given test module:
+
+ def test_something():
+ ...
+ def test_otherthing():
+ pass
+
+Here is what happens::
+
+ db(request) executes with request.param == 1
+ mysetup(request, db) executes
+ test_something() executes
+ mysetup_finalize() executes
+ mysetup(request, db) executes
+ test_otherthing() executes
+ mysetup_finalize() executes
+ db_finalize() executes
+ db(request) executes with request.param == 2
+ mysetup(request, db) executes
+ test_something() executes
+ mysetup_finalize() executes
+ mysetup(request, db) executes
+ test_otherthing() executes
+ mysetup_finalize() executes
+ db_finalize() executes
+
+
+Example 3 - funcargs session-mix
+----------------------------------------
+
+Similar with funcargs, an example::
+
+ @pytest.mark.funcarg(scope="session", params=[1,2])
+ def db(request):
+ request.addfinalizer(db_finalize)
+
+ @pytest.mark.funcarg(scope="function")
+ def table(request, db):
+ ...
+ request.addfinalizer(table_finalize)
+ ...
+
+And a given test module:
+
+ def test_something(table):
+ ...
+ def test_otherthing(table):
+ pass
+ def test_thirdthing():
+ pass
+
+Here is what happens::
+
+ db(request) executes with param == 1
+ table(request, db)
+ test_something(table)
+ table_finalize()
+ table(request, db)
+ test_otherthing(table)
+ table_finalize()
+ db_finalize
+ db(request) executes with param == 2
+ table(request, db)
+ test_something(table)
+ table_finalize()
+ table(request, db)
+ test_otherthing(table)
+ table_finalize()
+ db_finalize
+ test_thirdthing()
+
+Data structures
+--------------------
+
+pytest internally maintains a dict of active funcargs with cache, param,
+finalizer, (scopeitem?) information:
+
+ active_funcargs = dict()
+
+if a parametrized "db" is activated:
+
+ active_funcargs["db"] = FuncargInfo(dbvalue, paramindex,
+ FuncargFinalize(...), scopeitem)
+
+if a test is torn down and the next test requires a differently
+parametrized "db":
+
+ for argname in item.callspec.params:
+ if argname in active_funcargs:
+ funcarginfo = active_funcargs[argname]
+ if funcarginfo.param != item.callspec.params[argname]:
+ funcarginfo.callfinalizer()
+ del node2funcarg[funcarginfo.scopeitem]
+ del active_funcargs[argname]
+ nodes_to_be_torn_down = ...
+ for node in nodes_to_be_torn_down:
+ if node in node2funcarg:
+ argname = node2funcarg[node]
+ active_funcargs[argname].callfinalizer()
+ del node2funcarg[node]
+ del active_funcargs[argname]
+
+if a test is setup requiring a "db" funcarg:
+
+ if "db" in active_funcargs:
+ return active_funcargs["db"][0]
+ funcarginfo = setup_funcarg()
+ active_funcargs["db"] = funcarginfo
+ node2funcarg[funcarginfo.scopeitem] = "db"
+
+Implementation plan for resources
+------------------------------------------
+
+1. Revert FuncargRequest to the old form, unmerge item/request
+ (done)
+2. make funcarg factories be discovered at collection time
+3. Introduce funcarg marker
+4. Introduce funcarg scope parameter
+5. Introduce funcarg parametrize parameter
+6. make setup functions be discovered at collection time
+7. (Introduce a pytest_fixture_protocol/setup_funcargs hook)
+
+methods and data structures
+--------------------------------
+
+A FuncarcManager holds all information about funcarg definitions
+including parametrization and scope definitions. It implements
+a pytest_generate_tests hook which performs parametrization as appropriate.
+
+as a simple example, let's consider a tree where a test function requires
+a "abc" funcarg and its factory defines it as parametrized and scoped
+for Modules. When collections hits the function item, it creates
+the metafunc object, and calls funcargdb.pytest_generate_tests(metafunc)
+which looks up available funcarg factories and their scope and parametrization.
+This information is equivalent to what can be provided today directly
+at the function site and it should thus be relatively straight forward
+to implement the additional way of defining parametrization/scoping.
+
+conftest loading:
+ each funcarg-factory will populate the session.funcargmanager
+
+When a test item is collected, it grows a dictionary
+(funcargname2factorycalllist). A factory lookup is performed
+for each required funcarg. The resulting factory call is stored
+with the item. If a function is parametrized multiple items are
+created with respective factory calls. Else if a factory is parametrized
+multiple items and calls to the factory function are created as well.
+
+At setup time, an item populates a funcargs mapping, mapping names
+to values. If a value is funcarg factories are queried for a given item
+test functions and setup functions are put in a class
+which looks up required funcarg factories.
+
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/junitxml.py b/testing/web-platform/tests/tools/pytest/_pytest/junitxml.py
new file mode 100644
index 000000000..660d718a6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/junitxml.py
@@ -0,0 +1,387 @@
+"""
+ report test results in JUnit-XML format,
+ for use with Jenkins and build integration servers.
+
+
+Based on initial code from Ross Lawley.
+"""
+# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
+# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
+
+import py
+import os
+import re
+import sys
+import time
+import pytest
+
+# Python 2.X and 3.X compatibility
+if sys.version_info[0] < 3:
+ from codecs import open
+else:
+ unichr = chr
+ unicode = str
+ long = int
+
+
+class Junit(py.xml.Namespace):
+ pass
+
+# We need to get the subset of the invalid unicode ranges according to
+# XML 1.0 which are valid in this python build. Hence we calculate
+# this dynamically instead of hardcoding it. The spec range of valid
+# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
+# | [#x10000-#x10FFFF]
+_legal_chars = (0x09, 0x0A, 0x0d)
+_legal_ranges = (
+ (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
+)
+_legal_xml_re = [
+ unicode("%s-%s") % (unichr(low), unichr(high))
+ for (low, high) in _legal_ranges if low < sys.maxunicode
+]
+_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
+illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
+del _legal_chars
+del _legal_ranges
+del _legal_xml_re
+
+_py_ext_re = re.compile(r"\.py$")
+
+
+def bin_xml_escape(arg):
+ def repl(matchobj):
+ i = ord(matchobj.group())
+ if i <= 0xFF:
+ return unicode('#x%02X') % i
+ else:
+ return unicode('#x%04X') % i
+
+ return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
+
+
+class _NodeReporter(object):
+ def __init__(self, nodeid, xml):
+
+ self.id = nodeid
+ self.xml = xml
+ self.add_stats = self.xml.add_stats
+ self.duration = 0
+ self.properties = []
+ self.nodes = []
+ self.testcase = None
+ self.attrs = {}
+
+ def append(self, node):
+ self.xml.add_stats(type(node).__name__)
+ self.nodes.append(node)
+
+ def add_property(self, name, value):
+ self.properties.append((str(name), bin_xml_escape(value)))
+
+ def make_properties_node(self):
+ """Return a Junit node containing custom properties, if any.
+ """
+ if self.properties:
+ return Junit.properties([
+ Junit.property(name=name, value=value)
+ for name, value in self.properties
+ ])
+ return ''
+
+ def record_testreport(self, testreport):
+ assert not self.testcase
+ names = mangle_test_address(testreport.nodeid)
+ classnames = names[:-1]
+ if self.xml.prefix:
+ classnames.insert(0, self.xml.prefix)
+ attrs = {
+ "classname": ".".join(classnames),
+ "name": bin_xml_escape(names[-1]),
+ "file": testreport.location[0],
+ }
+ if testreport.location[1] is not None:
+ attrs["line"] = testreport.location[1]
+ self.attrs = attrs
+
+ def to_xml(self):
+ testcase = Junit.testcase(time=self.duration, **self.attrs)
+ testcase.append(self.make_properties_node())
+ for node in self.nodes:
+ testcase.append(node)
+ return testcase
+
+ def _add_simple(self, kind, message, data=None):
+ data = bin_xml_escape(data)
+ node = kind(data, message=message)
+ self.append(node)
+
+ def _write_captured_output(self, report):
+ for capname in ('out', 'err'):
+ allcontent = ""
+ for name, content in report.get_sections("Captured std%s" %
+ capname):
+ allcontent += content
+ if allcontent:
+ tag = getattr(Junit, 'system-' + capname)
+ self.append(tag(bin_xml_escape(allcontent)))
+
+ def append_pass(self, report):
+ self.add_stats('passed')
+ self._write_captured_output(report)
+
+ def append_failure(self, report):
+ # msg = str(report.longrepr.reprtraceback.extraline)
+ if hasattr(report, "wasxfail"):
+ self._add_simple(
+ Junit.skipped,
+ "xfail-marked test passes unexpectedly")
+ else:
+ if hasattr(report.longrepr, "reprcrash"):
+ message = report.longrepr.reprcrash.message
+ elif isinstance(report.longrepr, (unicode, str)):
+ message = report.longrepr
+ else:
+ message = str(report.longrepr)
+ message = bin_xml_escape(message)
+ fail = Junit.failure(message=message)
+ fail.append(bin_xml_escape(report.longrepr))
+ self.append(fail)
+ self._write_captured_output(report)
+
+ def append_collect_error(self, report):
+ # msg = str(report.longrepr.reprtraceback.extraline)
+ self.append(Junit.error(bin_xml_escape(report.longrepr),
+ message="collection failure"))
+
+ def append_collect_skipped(self, report):
+ self._add_simple(
+ Junit.skipped, "collection skipped", report.longrepr)
+
+ def append_error(self, report):
+ self._add_simple(
+ Junit.error, "test setup failure", report.longrepr)
+ self._write_captured_output(report)
+
+ def append_skipped(self, report):
+ if hasattr(report, "wasxfail"):
+ self._add_simple(
+ Junit.skipped, "expected test failure", report.wasxfail
+ )
+ else:
+ filename, lineno, skipreason = report.longrepr
+ if skipreason.startswith("Skipped: "):
+ skipreason = bin_xml_escape(skipreason[9:])
+ self.append(
+ Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
+ type="pytest.skip",
+ message=skipreason))
+ self._write_captured_output(report)
+
+ def finalize(self):
+ data = self.to_xml().unicode(indent=0)
+ self.__dict__.clear()
+ self.to_xml = lambda: py.xml.raw(data)
+
+
+@pytest.fixture
+def record_xml_property(request):
+ """Fixture that adds extra xml properties to the tag for the calling test.
+ The fixture is callable with (name, value), with value being automatically
+ xml-encoded.
+ """
+ request.node.warn(
+ code='C3',
+ message='record_xml_property is an experimental feature',
+ )
+ xml = getattr(request.config, "_xml", None)
+ if xml is not None:
+ node_reporter = xml.node_reporter(request.node.nodeid)
+ return node_reporter.add_property
+ else:
+ def add_property_noop(name, value):
+ pass
+
+ return add_property_noop
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group.addoption(
+ '--junitxml', '--junit-xml',
+ action="store",
+ dest="xmlpath",
+ metavar="path",
+ default=None,
+ help="create junit-xml style report file at given path.")
+ group.addoption(
+ '--junitprefix', '--junit-prefix',
+ action="store",
+ metavar="str",
+ default=None,
+ help="prepend prefix to classnames in junit-xml output")
+
+
+def pytest_configure(config):
+ xmlpath = config.option.xmlpath
+ # prevent opening xmllog on slave nodes (xdist)
+ if xmlpath and not hasattr(config, 'slaveinput'):
+ config._xml = LogXML(xmlpath, config.option.junitprefix)
+ config.pluginmanager.register(config._xml)
+
+
+def pytest_unconfigure(config):
+ xml = getattr(config, '_xml', None)
+ if xml:
+ del config._xml
+ config.pluginmanager.unregister(xml)
+
+
+def mangle_test_address(address):
+ path, possible_open_bracket, params = address.partition('[')
+ names = path.split("::")
+ try:
+ names.remove('()')
+ except ValueError:
+ pass
+ # convert file path to dotted path
+ names[0] = names[0].replace("/", '.')
+ names[0] = _py_ext_re.sub("", names[0])
+ # put any params back
+ names[-1] += possible_open_bracket + params
+ return names
+
+
+class LogXML(object):
+ def __init__(self, logfile, prefix):
+ logfile = os.path.expanduser(os.path.expandvars(logfile))
+ self.logfile = os.path.normpath(os.path.abspath(logfile))
+ self.prefix = prefix
+ self.stats = dict.fromkeys([
+ 'error',
+ 'passed',
+ 'failure',
+ 'skipped',
+ ], 0)
+ self.node_reporters = {} # nodeid -> _NodeReporter
+ self.node_reporters_ordered = []
+
+ def finalize(self, report):
+ nodeid = getattr(report, 'nodeid', report)
+ # local hack to handle xdist report order
+ slavenode = getattr(report, 'node', None)
+ reporter = self.node_reporters.pop((nodeid, slavenode))
+ if reporter is not None:
+ reporter.finalize()
+
+ def node_reporter(self, report):
+ nodeid = getattr(report, 'nodeid', report)
+ # local hack to handle xdist report order
+ slavenode = getattr(report, 'node', None)
+
+ key = nodeid, slavenode
+
+ if key in self.node_reporters:
+ # TODO: breasks for --dist=each
+ return self.node_reporters[key]
+ reporter = _NodeReporter(nodeid, self)
+ self.node_reporters[key] = reporter
+ self.node_reporters_ordered.append(reporter)
+ return reporter
+
+ def add_stats(self, key):
+ if key in self.stats:
+ self.stats[key] += 1
+
+ def _opentestcase(self, report):
+ reporter = self.node_reporter(report)
+ reporter.record_testreport(report)
+ return reporter
+
+ def pytest_runtest_logreport(self, report):
+ """handle a setup/call/teardown report, generating the appropriate
+ xml tags as necessary.
+
+ note: due to plugins like xdist, this hook may be called in interlaced
+ order with reports from other nodes. for example:
+
+ usual call order:
+ -> setup node1
+ -> call node1
+ -> teardown node1
+ -> setup node2
+ -> call node2
+ -> teardown node2
+
+ possible call order in xdist:
+ -> setup node1
+ -> call node1
+ -> setup node2
+ -> call node2
+ -> teardown node2
+ -> teardown node1
+ """
+ if report.passed:
+ if report.when == "call": # ignore setup/teardown
+ reporter = self._opentestcase(report)
+ reporter.append_pass(report)
+ elif report.failed:
+ reporter = self._opentestcase(report)
+ if report.when == "call":
+ reporter.append_failure(report)
+ else:
+ reporter.append_error(report)
+ elif report.skipped:
+ reporter = self._opentestcase(report)
+ reporter.append_skipped(report)
+ self.update_testcase_duration(report)
+ if report.when == "teardown":
+ self.finalize(report)
+
+ def update_testcase_duration(self, report):
+ """accumulates total duration for nodeid from given report and updates
+ the Junit.testcase with the new total if already created.
+ """
+ reporter = self.node_reporter(report)
+ reporter.duration += getattr(report, 'duration', 0.0)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ reporter = self._opentestcase(report)
+ if report.failed:
+ reporter.append_collect_error(report)
+ else:
+ reporter.append_collect_skipped(report)
+
+ def pytest_internalerror(self, excrepr):
+ reporter = self.node_reporter('internal')
+ reporter.attrs.update(classname="pytest", name='internal')
+ reporter._add_simple(Junit.error, 'internal error', excrepr)
+
+ def pytest_sessionstart(self):
+ self.suite_start_time = time.time()
+
+ def pytest_sessionfinish(self):
+ dirname = os.path.dirname(os.path.abspath(self.logfile))
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ logfile = open(self.logfile, 'w', encoding='utf-8')
+ suite_stop_time = time.time()
+ suite_time_delta = suite_stop_time - self.suite_start_time
+
+ numtests = self.stats['passed'] + self.stats['failure']
+
+ logfile.write('<?xml version="1.0" encoding="utf-8"?>')
+ logfile.write(Junit.testsuite(
+ [x.to_xml() for x in self.node_reporters_ordered],
+ name="pytest",
+ errors=self.stats['error'],
+ failures=self.stats['failure'],
+ skips=self.stats['skipped'],
+ tests=numtests,
+ time="%.3f" % suite_time_delta, ).unicode(indent=0))
+ logfile.close()
+
+ def pytest_terminal_summary(self, terminalreporter):
+ terminalreporter.write_sep("-",
+ "generated xml file: %s" % (self.logfile))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/main.py b/testing/web-platform/tests/tools/pytest/_pytest/main.py
new file mode 100644
index 000000000..8654d7af6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/main.py
@@ -0,0 +1,744 @@
+""" core implementation of testing process: init, session, runtest loop. """
+import imp
+import os
+import re
+import sys
+
+import _pytest
+import _pytest._code
+import py
+import pytest
+try:
+ from collections import MutableMapping as MappingMixin
+except ImportError:
+ from UserDict import DictMixin as MappingMixin
+
+from _pytest.runner import collect_one_node
+
+tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
+
+# exitcodes for the command line
+EXIT_OK = 0
+EXIT_TESTSFAILED = 1
+EXIT_INTERRUPTED = 2
+EXIT_INTERNALERROR = 3
+EXIT_USAGEERROR = 4
+EXIT_NOTESTSCOLLECTED = 5
+
+name_re = re.compile("^[a-zA-Z_]\w*$")
+
+def pytest_addoption(parser):
+ parser.addini("norecursedirs", "directory patterns to avoid for recursion",
+ type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
+ parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
+ type="args", default=[])
+ #parser.addini("dirpatterns",
+ # "patterns specifying possible locations of test files",
+ # type="linelist", default=["**/test_*.txt",
+ # "**/test_*.py", "**/*_test.py"]
+ #)
+ group = parser.getgroup("general", "running and selection options")
+ group._addoption('-x', '--exitfirst', action="store_true", default=False,
+ dest="exitfirst",
+ help="exit instantly on first error or failed test."),
+ group._addoption('--maxfail', metavar="num",
+ action="store", type=int, dest="maxfail", default=0,
+ help="exit after first num failures or errors.")
+ group._addoption('--strict', action="store_true",
+ help="run pytest in strict mode, warnings become errors.")
+ group._addoption("-c", metavar="file", type=str, dest="inifilename",
+ help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
+
+ group = parser.getgroup("collect", "collection")
+ group.addoption('--collectonly', '--collect-only', action="store_true",
+ help="only collect tests, don't execute them."),
+ group.addoption('--pyargs', action="store_true",
+ help="try to interpret all arguments as python packages.")
+ group.addoption("--ignore", action="append", metavar="path",
+ help="ignore path during collection (multi-allowed).")
+ # when changing this to --conf-cut-dir, config.py Conftest.setinitial
+ # needs upgrading as well
+ group.addoption('--confcutdir', dest="confcutdir", default=None,
+ metavar="dir",
+ help="only load conftest.py's relative to specified dir.")
+ group.addoption('--noconftest', action="store_true",
+ dest="noconftest", default=False,
+ help="Don't load any conftest.py files.")
+
+ group = parser.getgroup("debugconfig",
+ "test session debugging and configuration")
+ group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
+ help="base temporary directory for this test run.")
+
+
+def pytest_namespace():
+ collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
+ return dict(collect=collect)
+
+def pytest_configure(config):
+ pytest.config = config # compatibiltiy
+ if config.option.exitfirst:
+ config.option.maxfail = 1
+
+def wrap_session(config, doit):
+ """Skeleton command line program"""
+ session = Session(config)
+ session.exitstatus = EXIT_OK
+ initstate = 0
+ try:
+ try:
+ config._do_configure()
+ initstate = 1
+ config.hook.pytest_sessionstart(session=session)
+ initstate = 2
+ session.exitstatus = doit(config, session) or 0
+ except pytest.UsageError:
+ raise
+ except KeyboardInterrupt:
+ excinfo = _pytest._code.ExceptionInfo()
+ config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
+ session.exitstatus = EXIT_INTERRUPTED
+ except:
+ excinfo = _pytest._code.ExceptionInfo()
+ config.notify_exception(excinfo, config.option)
+ session.exitstatus = EXIT_INTERNALERROR
+ if excinfo.errisinstance(SystemExit):
+ sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
+
+ finally:
+ excinfo = None # Explicitly break reference cycle.
+ session.startdir.chdir()
+ if initstate >= 2:
+ config.hook.pytest_sessionfinish(
+ session=session,
+ exitstatus=session.exitstatus)
+ config._ensure_unconfigure()
+ return session.exitstatus
+
+def pytest_cmdline_main(config):
+ return wrap_session(config, _main)
+
+def _main(config, session):
+ """ default command line protocol for initialization, session,
+ running tests and reporting. """
+ config.hook.pytest_collection(session=session)
+ config.hook.pytest_runtestloop(session=session)
+
+ if session.testsfailed:
+ return EXIT_TESTSFAILED
+ elif session.testscollected == 0:
+ return EXIT_NOTESTSCOLLECTED
+
+def pytest_collection(session):
+ return session.perform_collect()
+
+def pytest_runtestloop(session):
+ if session.config.option.collectonly:
+ return True
+
+ def getnextitem(i):
+ # this is a function to avoid python2
+ # keeping sys.exc_info set when calling into a test
+ # python2 keeps sys.exc_info till the frame is left
+ try:
+ return session.items[i+1]
+ except IndexError:
+ return None
+
+ for i, item in enumerate(session.items):
+ nextitem = getnextitem(i)
+ item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
+ if session.shouldstop:
+ raise session.Interrupted(session.shouldstop)
+ return True
+
+def pytest_ignore_collect(path, config):
+ p = path.dirpath()
+ ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
+ ignore_paths = ignore_paths or []
+ excludeopt = config.getoption("ignore")
+ if excludeopt:
+ ignore_paths.extend([py.path.local(x) for x in excludeopt])
+ return path in ignore_paths
+
+class FSHookProxy:
+ def __init__(self, fspath, pm, remove_mods):
+ self.fspath = fspath
+ self.pm = pm
+ self.remove_mods = remove_mods
+
+ def __getattr__(self, name):
+ x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
+ self.__dict__[name] = x
+ return x
+
+def compatproperty(name):
+ def fget(self):
+ # deprecated - use pytest.name
+ return getattr(pytest, name)
+
+ return property(fget)
+
+class NodeKeywords(MappingMixin):
+ def __init__(self, node):
+ self.node = node
+ self.parent = node.parent
+ self._markers = {node.name: True}
+
+ def __getitem__(self, key):
+ try:
+ return self._markers[key]
+ except KeyError:
+ if self.parent is None:
+ raise
+ return self.parent.keywords[key]
+
+ def __setitem__(self, key, value):
+ self._markers[key] = value
+
+ def __delitem__(self, key):
+ raise ValueError("cannot delete key in keywords dict")
+
+ def __iter__(self):
+ seen = set(self._markers)
+ if self.parent is not None:
+ seen.update(self.parent.keywords)
+ return iter(seen)
+
+ def __len__(self):
+ return len(self.__iter__())
+
+ def keys(self):
+ return list(self)
+
+ def __repr__(self):
+ return "<NodeKeywords for node %s>" % (self.node, )
+
+
+class Node(object):
+ """ base class for Collector and Item the test collection tree.
+ Collector subclasses have children, Items are terminal nodes."""
+
+ def __init__(self, name, parent=None, config=None, session=None):
+ #: a unique name within the scope of the parent node
+ self.name = name
+
+ #: the parent collector node.
+ self.parent = parent
+
+ #: the pytest config object
+ self.config = config or parent.config
+
+ #: the session this node is part of
+ self.session = session or parent.session
+
+ #: filesystem path where this node was collected from (can be None)
+ self.fspath = getattr(parent, 'fspath', None)
+
+ #: keywords/markers collected from all scopes
+ self.keywords = NodeKeywords(self)
+
+ #: allow adding of extra keywords to use for matching
+ self.extra_keyword_matches = set()
+
+ # used for storing artificial fixturedefs for direct parametrization
+ self._name2pseudofixturedef = {}
+
+ @property
+ def ihook(self):
+ """ fspath sensitive hook proxy used to call pytest hooks"""
+ return self.session.gethookproxy(self.fspath)
+
+ Module = compatproperty("Module")
+ Class = compatproperty("Class")
+ Instance = compatproperty("Instance")
+ Function = compatproperty("Function")
+ File = compatproperty("File")
+ Item = compatproperty("Item")
+
+ def _getcustomclass(self, name):
+ cls = getattr(self, name)
+ if cls != getattr(pytest, name):
+ py.log._apiwarn("2.0", "use of node.%s is deprecated, "
+ "use pytest_pycollect_makeitem(...) to create custom "
+ "collection nodes" % name)
+ return cls
+
+ def __repr__(self):
+ return "<%s %r>" %(self.__class__.__name__,
+ getattr(self, 'name', None))
+
+ def warn(self, code, message):
+ """ generate a warning with the given code and message for this
+ item. """
+ assert isinstance(code, str)
+ fslocation = getattr(self, "location", None)
+ if fslocation is None:
+ fslocation = getattr(self, "fspath", None)
+ else:
+ fslocation = "%s:%s" % fslocation[:2]
+
+ self.ihook.pytest_logwarning.call_historic(kwargs=dict(
+ code=code, message=message,
+ nodeid=self.nodeid, fslocation=fslocation))
+
+ # methods for ordering nodes
+ @property
+ def nodeid(self):
+ """ a ::-separated string denoting its collection tree address. """
+ try:
+ return self._nodeid
+ except AttributeError:
+ self._nodeid = x = self._makeid()
+ return x
+
+ def _makeid(self):
+ return self.parent.nodeid + "::" + self.name
+
+ def __hash__(self):
+ return hash(self.nodeid)
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+ def _memoizedcall(self, attrname, function):
+ exattrname = "_ex_" + attrname
+ failure = getattr(self, exattrname, None)
+ if failure is not None:
+ py.builtin._reraise(failure[0], failure[1], failure[2])
+ if hasattr(self, attrname):
+ return getattr(self, attrname)
+ try:
+ res = function()
+ except py.builtin._sysex:
+ raise
+ except:
+ failure = sys.exc_info()
+ setattr(self, exattrname, failure)
+ raise
+ setattr(self, attrname, res)
+ return res
+
+ def listchain(self):
+ """ return list of all parent collectors up to self,
+ starting from root of collection tree. """
+ chain = []
+ item = self
+ while item is not None:
+ chain.append(item)
+ item = item.parent
+ chain.reverse()
+ return chain
+
+ def add_marker(self, marker):
+ """ dynamically add a marker object to the node.
+
+ ``marker`` can be a string or pytest.mark.* instance.
+ """
+ from _pytest.mark import MarkDecorator
+ if isinstance(marker, py.builtin._basestring):
+ marker = MarkDecorator(marker)
+ elif not isinstance(marker, MarkDecorator):
+ raise ValueError("is not a string or pytest.mark.* Marker")
+ self.keywords[marker.name] = marker
+
+ def get_marker(self, name):
+ """ get a marker object from this node or None if
+ the node doesn't have a marker with that name. """
+ val = self.keywords.get(name, None)
+ if val is not None:
+ from _pytest.mark import MarkInfo, MarkDecorator
+ if isinstance(val, (MarkDecorator, MarkInfo)):
+ return val
+
+ def listextrakeywords(self):
+ """ Return a set of all extra keywords in self and any parents."""
+ extra_keywords = set()
+ item = self
+ for item in self.listchain():
+ extra_keywords.update(item.extra_keyword_matches)
+ return extra_keywords
+
+ def listnames(self):
+ return [x.name for x in self.listchain()]
+
+ def addfinalizer(self, fin):
+ """ register a function to be called when this node is finalized.
+
+ This method can only be called when this node is active
+ in a setup chain, for example during self.setup().
+ """
+ self.session._setupstate.addfinalizer(fin, self)
+
+ def getparent(self, cls):
+ """ get the next parent node (including ourself)
+ which is an instance of the given class"""
+ current = self
+ while current and not isinstance(current, cls):
+ current = current.parent
+ return current
+
+ def _prunetraceback(self, excinfo):
+ pass
+
+ def _repr_failure_py(self, excinfo, style=None):
+ fm = self.session._fixturemanager
+ if excinfo.errisinstance(fm.FixtureLookupError):
+ return excinfo.value.formatrepr()
+ tbfilter = True
+ if self.config.option.fulltrace:
+ style="long"
+ else:
+ self._prunetraceback(excinfo)
+ tbfilter = False # prunetraceback already does it
+ if style == "auto":
+ style = "long"
+ # XXX should excinfo.getrepr record all data and toterminal() process it?
+ if style is None:
+ if self.config.option.tbstyle == "short":
+ style = "short"
+ else:
+ style = "long"
+
+ return excinfo.getrepr(funcargs=True,
+ showlocals=self.config.option.showlocals,
+ style=style, tbfilter=tbfilter)
+
+ repr_failure = _repr_failure_py
+
+class Collector(Node):
+ """ Collector instances create children through collect()
+ and thus iteratively build a tree.
+ """
+
+ class CollectError(Exception):
+ """ an error during collection, contains a custom message. """
+
+ def collect(self):
+ """ returns a list of children (items and collectors)
+ for this collection node.
+ """
+ raise NotImplementedError("abstract")
+
+ def repr_failure(self, excinfo):
+ """ represent a collection failure. """
+ if excinfo.errisinstance(self.CollectError):
+ exc = excinfo.value
+ return str(exc.args[0])
+ return self._repr_failure_py(excinfo, style="short")
+
+ def _memocollect(self):
+ """ internal helper method to cache results of calling collect(). """
+ return self._memoizedcall('_collected', lambda: list(self.collect()))
+
+ def _prunetraceback(self, excinfo):
+ if hasattr(self, 'fspath'):
+ traceback = excinfo.traceback
+ ntraceback = traceback.cut(path=self.fspath)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
+ excinfo.traceback = ntraceback.filter()
+
+class FSCollector(Collector):
+ def __init__(self, fspath, parent=None, config=None, session=None):
+ fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
+ name = fspath.basename
+ if parent is not None:
+ rel = fspath.relto(parent.fspath)
+ if rel:
+ name = rel
+ name = name.replace(os.sep, "/")
+ super(FSCollector, self).__init__(name, parent, config, session)
+ self.fspath = fspath
+
+ def _makeid(self):
+ relpath = self.fspath.relto(self.config.rootdir)
+ if os.sep != "/":
+ relpath = relpath.replace(os.sep, "/")
+ return relpath
+
+class File(FSCollector):
+ """ base class for collecting tests from a file. """
+
+class Item(Node):
+ """ a basic test invocation item. Note that for a single function
+ there might be multiple test invocation items.
+ """
+ nextitem = None
+
+ def __init__(self, name, parent=None, config=None, session=None):
+ super(Item, self).__init__(name, parent, config, session)
+ self._report_sections = []
+
+ def add_report_section(self, when, key, content):
+ if content:
+ self._report_sections.append((when, key, content))
+
+ def reportinfo(self):
+ return self.fspath, None, ""
+
+ @property
+ def location(self):
+ try:
+ return self._location
+ except AttributeError:
+ location = self.reportinfo()
+ # bestrelpath is a quite slow function
+ cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
+ try:
+ fspath = cache[location[0]]
+ except KeyError:
+ fspath = self.session.fspath.bestrelpath(location[0])
+ cache[location[0]] = fspath
+ location = (fspath, location[1], str(location[2]))
+ self._location = location
+ return location
+
+class NoMatch(Exception):
+ """ raised if matching cannot locate a matching names. """
+
+class Interrupted(KeyboardInterrupt):
+ """ signals an interrupted test run. """
+ __module__ = 'builtins' # for py3
+
+class Session(FSCollector):
+ Interrupted = Interrupted
+
+ def __init__(self, config):
+ FSCollector.__init__(self, config.rootdir, parent=None,
+ config=config, session=self)
+ self._fs2hookproxy = {}
+ self.testsfailed = 0
+ self.testscollected = 0
+ self.shouldstop = False
+ self.trace = config.trace.root.get("collection")
+ self._norecursepatterns = config.getini("norecursedirs")
+ self.startdir = py.path.local()
+ self.config.pluginmanager.register(self, name="session")
+
+ def _makeid(self):
+ return ""
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_collectstart(self):
+ if self.shouldstop:
+ raise self.Interrupted(self.shouldstop)
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_runtest_logreport(self, report):
+ if report.failed and not hasattr(report, 'wasxfail'):
+ self.testsfailed += 1
+ maxfail = self.config.getvalue("maxfail")
+ if maxfail and self.testsfailed >= maxfail:
+ self.shouldstop = "stopping after %d failures" % (
+ self.testsfailed)
+ pytest_collectreport = pytest_runtest_logreport
+
+ def isinitpath(self, path):
+ return path in self._initialpaths
+
+ def gethookproxy(self, fspath):
+ try:
+ return self._fs2hookproxy[fspath]
+ except KeyError:
+ # check if we have the common case of running
+ # hooks with all conftest.py filesall conftest.py
+ pm = self.config.pluginmanager
+ my_conftestmodules = pm._getconftestmodules(fspath)
+ remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
+ if remove_mods:
+ # one or more conftests are not in use at this fspath
+ proxy = FSHookProxy(fspath, pm, remove_mods)
+ else:
+ # all plugis are active for this fspath
+ proxy = self.config.hook
+
+ self._fs2hookproxy[fspath] = proxy
+ return proxy
+
+ def perform_collect(self, args=None, genitems=True):
+ hook = self.config.hook
+ try:
+ items = self._perform_collect(args, genitems)
+ hook.pytest_collection_modifyitems(session=self,
+ config=self.config, items=items)
+ finally:
+ hook.pytest_collection_finish(session=self)
+ self.testscollected = len(items)
+ return items
+
+ def _perform_collect(self, args, genitems):
+ if args is None:
+ args = self.config.args
+ self.trace("perform_collect", self, args)
+ self.trace.root.indent += 1
+ self._notfound = []
+ self._initialpaths = set()
+ self._initialparts = []
+ self.items = items = []
+ for arg in args:
+ parts = self._parsearg(arg)
+ self._initialparts.append(parts)
+ self._initialpaths.add(parts[0])
+ rep = collect_one_node(self)
+ self.ihook.pytest_collectreport(report=rep)
+ self.trace.root.indent -= 1
+ if self._notfound:
+ errors = []
+ for arg, exc in self._notfound:
+ line = "(no name %r in any of %r)" % (arg, exc.args[0])
+ errors.append("not found: %s\n%s" % (arg, line))
+ #XXX: test this
+ raise pytest.UsageError(*errors)
+ if not genitems:
+ return rep.result
+ else:
+ if rep.passed:
+ for node in rep.result:
+ self.items.extend(self.genitems(node))
+ return items
+
+ def collect(self):
+ for parts in self._initialparts:
+ arg = "::".join(map(str, parts))
+ self.trace("processing argument", arg)
+ self.trace.root.indent += 1
+ try:
+ for x in self._collect(arg):
+ yield x
+ except NoMatch:
+ # we are inside a make_report hook so
+ # we cannot directly pass through the exception
+ self._notfound.append((arg, sys.exc_info()[1]))
+
+ self.trace.root.indent -= 1
+
+ def _collect(self, arg):
+ names = self._parsearg(arg)
+ path = names.pop(0)
+ if path.check(dir=1):
+ assert not names, "invalid arg %r" %(arg,)
+ for path in path.visit(fil=lambda x: x.check(file=1),
+ rec=self._recurse, bf=True, sort=True):
+ for x in self._collectfile(path):
+ yield x
+ else:
+ assert path.check(file=1)
+ for x in self.matchnodes(self._collectfile(path), names):
+ yield x
+
+ def _collectfile(self, path):
+ ihook = self.gethookproxy(path)
+ if not self.isinitpath(path):
+ if ihook.pytest_ignore_collect(path=path, config=self.config):
+ return ()
+ return ihook.pytest_collect_file(path=path, parent=self)
+
+ def _recurse(self, path):
+ ihook = self.gethookproxy(path.dirpath())
+ if ihook.pytest_ignore_collect(path=path, config=self.config):
+ return
+ for pat in self._norecursepatterns:
+ if path.check(fnmatch=pat):
+ return False
+ ihook = self.gethookproxy(path)
+ ihook.pytest_collect_directory(path=path, parent=self)
+ return True
+
+ def _tryconvertpyarg(self, x):
+ mod = None
+ path = [os.path.abspath('.')] + sys.path
+ for name in x.split('.'):
+ # ignore anything that's not a proper name here
+ # else something like --pyargs will mess up '.'
+ # since imp.find_module will actually sometimes work for it
+ # but it's supposed to be considered a filesystem path
+ # not a package
+ if name_re.match(name) is None:
+ return x
+ try:
+ fd, mod, type_ = imp.find_module(name, path)
+ except ImportError:
+ return x
+ else:
+ if fd is not None:
+ fd.close()
+
+ if type_[2] != imp.PKG_DIRECTORY:
+ path = [os.path.dirname(mod)]
+ else:
+ path = [mod]
+ return mod
+
+ def _parsearg(self, arg):
+ """ return (fspath, names) tuple after checking the file exists. """
+ arg = str(arg)
+ if self.config.option.pyargs:
+ arg = self._tryconvertpyarg(arg)
+ parts = str(arg).split("::")
+ relpath = parts[0].replace("/", os.sep)
+ path = self.config.invocation_dir.join(relpath, abs=True)
+ if not path.check():
+ if self.config.option.pyargs:
+ msg = "file or package not found: "
+ else:
+ msg = "file not found: "
+ raise pytest.UsageError(msg + arg)
+ parts[0] = path
+ return parts
+
+ def matchnodes(self, matching, names):
+ self.trace("matchnodes", matching, names)
+ self.trace.root.indent += 1
+ nodes = self._matchnodes(matching, names)
+ num = len(nodes)
+ self.trace("matchnodes finished -> ", num, "nodes")
+ self.trace.root.indent -= 1
+ if num == 0:
+ raise NoMatch(matching, names[:1])
+ return nodes
+
+ def _matchnodes(self, matching, names):
+ if not matching or not names:
+ return matching
+ name = names[0]
+ assert name
+ nextnames = names[1:]
+ resultnodes = []
+ for node in matching:
+ if isinstance(node, pytest.Item):
+ if not names:
+ resultnodes.append(node)
+ continue
+ assert isinstance(node, pytest.Collector)
+ rep = collect_one_node(node)
+ if rep.passed:
+ has_matched = False
+ for x in rep.result:
+ # TODO: remove parametrized workaround once collection structure contains parametrization
+ if x.name == name or x.name.split("[")[0] == name:
+ resultnodes.extend(self.matchnodes([x], nextnames))
+ has_matched = True
+ # XXX accept IDs that don't have "()" for class instances
+ if not has_matched and len(rep.result) == 1 and x.name == "()":
+ nextnames.insert(0, name)
+ resultnodes.extend(self.matchnodes([x], nextnames))
+ node.ihook.pytest_collectreport(report=rep)
+ return resultnodes
+
+ def genitems(self, node):
+ self.trace("genitems", node)
+ if isinstance(node, pytest.Item):
+ node.ihook.pytest_itemcollected(item=node)
+ yield node
+ else:
+ assert isinstance(node, pytest.Collector)
+ rep = collect_one_node(node)
+ if rep.passed:
+ for subnode in rep.result:
+ for x in self.genitems(subnode):
+ yield x
+ node.ihook.pytest_collectreport(report=rep)
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/mark.py b/testing/web-platform/tests/tools/pytest/_pytest/mark.py
new file mode 100644
index 000000000..1a7635402
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/mark.py
@@ -0,0 +1,311 @@
+""" generic mechanism for marking and selecting python functions. """
+import inspect
+
+
+class MarkerError(Exception):
+
+ """Error in use of a pytest marker/attribute."""
+
+
+def pytest_namespace():
+ return {'mark': MarkGenerator()}
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption(
+ '-k',
+ action="store", dest="keyword", default='', metavar="EXPRESSION",
+ help="only run tests which match the given substring expression. "
+ "An expression is a python evaluatable expression "
+ "where all names are substring-matched against test names "
+ "and their parent classes. Example: -k 'test_method or test "
+ "other' matches all test functions and classes whose name "
+ "contains 'test_method' or 'test_other'. "
+ "Additionally keywords are matched to classes and functions "
+ "containing extra names in their 'extra_keyword_matches' set, "
+ "as well as functions which have names assigned directly to them."
+ )
+
+ group._addoption(
+ "-m",
+ action="store", dest="markexpr", default="", metavar="MARKEXPR",
+ help="only run tests matching given mark expression. "
+ "example: -m 'mark1 and not mark2'."
+ )
+
+ group.addoption(
+ "--markers", action="store_true",
+ help="show markers (builtin, plugin and per-project ones)."
+ )
+
+ parser.addini("markers", "markers for test functions", 'linelist')
+
+
+def pytest_cmdline_main(config):
+ import _pytest.config
+ if config.option.markers:
+ config._do_configure()
+ tw = _pytest.config.create_terminal_writer(config)
+ for line in config.getini("markers"):
+ name, rest = line.split(":", 1)
+ tw.write("@pytest.mark.%s:" % name, bold=True)
+ tw.line(rest)
+ tw.line()
+ config._ensure_unconfigure()
+ return 0
+pytest_cmdline_main.tryfirst = True
+
+
+def pytest_collection_modifyitems(items, config):
+ keywordexpr = config.option.keyword
+ matchexpr = config.option.markexpr
+ if not keywordexpr and not matchexpr:
+ return
+ # pytest used to allow "-" for negating
+ # but today we just allow "-" at the beginning, use "not" instead
+ # we probably remove "-" alltogether soon
+ if keywordexpr.startswith("-"):
+ keywordexpr = "not " + keywordexpr[1:]
+ selectuntil = False
+ if keywordexpr[-1:] == ":":
+ selectuntil = True
+ keywordexpr = keywordexpr[:-1]
+
+ remaining = []
+ deselected = []
+ for colitem in items:
+ if keywordexpr and not matchkeyword(colitem, keywordexpr):
+ deselected.append(colitem)
+ else:
+ if selectuntil:
+ keywordexpr = None
+ if matchexpr:
+ if not matchmark(colitem, matchexpr):
+ deselected.append(colitem)
+ continue
+ remaining.append(colitem)
+
+ if deselected:
+ config.hook.pytest_deselected(items=deselected)
+ items[:] = remaining
+
+
+class MarkMapping:
+ """Provides a local mapping for markers where item access
+ resolves to True if the marker is present. """
+ def __init__(self, keywords):
+ mymarks = set()
+ for key, value in keywords.items():
+ if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
+ mymarks.add(key)
+ self._mymarks = mymarks
+
+ def __getitem__(self, name):
+ return name in self._mymarks
+
+
+class KeywordMapping:
+ """Provides a local mapping for keywords.
+ Given a list of names, map any substring of one of these names to True.
+ """
+ def __init__(self, names):
+ self._names = names
+
+ def __getitem__(self, subname):
+ for name in self._names:
+ if subname in name:
+ return True
+ return False
+
+
+def matchmark(colitem, markexpr):
+ """Tries to match on any marker names, attached to the given colitem."""
+ return eval(markexpr, {}, MarkMapping(colitem.keywords))
+
+
+def matchkeyword(colitem, keywordexpr):
+ """Tries to match given keyword expression to given collector item.
+
+ Will match on the name of colitem, including the names of its parents.
+ Only matches names of items which are either a :class:`Class` or a
+ :class:`Function`.
+ Additionally, matches on names in the 'extra_keyword_matches' set of
+ any item, as well as names directly assigned to test functions.
+ """
+ mapped_names = set()
+
+ # Add the names of the current item and any parent items
+ import pytest
+ for item in colitem.listchain():
+ if not isinstance(item, pytest.Instance):
+ mapped_names.add(item.name)
+
+ # Add the names added as extra keywords to current or parent items
+ for name in colitem.listextrakeywords():
+ mapped_names.add(name)
+
+ # Add the names attached to the current function through direct assignment
+ if hasattr(colitem, 'function'):
+ for name in colitem.function.__dict__:
+ mapped_names.add(name)
+
+ mapping = KeywordMapping(mapped_names)
+ if " " not in keywordexpr:
+ # special case to allow for simple "-k pass" and "-k 1.3"
+ return mapping[keywordexpr]
+ elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
+ return not mapping[keywordexpr[4:]]
+ return eval(keywordexpr, {}, mapping)
+
+
+def pytest_configure(config):
+ import pytest
+ if config.option.strict:
+ pytest.mark._config = config
+
+
+class MarkGenerator:
+ """ Factory for :class:`MarkDecorator` objects - exposed as
+ a ``pytest.mark`` singleton instance. Example::
+
+ import pytest
+ @pytest.mark.slowtest
+ def test_function():
+ pass
+
+ will set a 'slowtest' :class:`MarkInfo` object
+ on the ``test_function`` object. """
+
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError("Marker name must NOT start with underscore")
+ if hasattr(self, '_config'):
+ self._check(name)
+ return MarkDecorator(name)
+
+ def _check(self, name):
+ try:
+ if name in self._markers:
+ return
+ except AttributeError:
+ pass
+ self._markers = l = set()
+ for line in self._config.getini("markers"):
+ beginning = line.split(":", 1)
+ x = beginning[0].split("(", 1)[0]
+ l.add(x)
+ if name not in self._markers:
+ raise AttributeError("%r not a registered marker" % (name,))
+
+def istestfunc(func):
+ return hasattr(func, "__call__") and \
+ getattr(func, "__name__", "<lambda>") != "<lambda>"
+
+class MarkDecorator:
+ """ A decorator for test functions and test classes. When applied
+ it will create :class:`MarkInfo` objects which may be
+ :ref:`retrieved by hooks as item keywords <excontrolskip>`.
+ MarkDecorator instances are often created like this::
+
+ mark1 = pytest.mark.NAME # simple MarkDecorator
+ mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
+
+ and can then be applied as decorators to test functions::
+
+ @mark2
+ def test_function():
+ pass
+
+ When a MarkDecorator instance is called it does the following:
+ 1. If called with a single class as its only positional argument and no
+ additional keyword arguments, it attaches itself to the class so it
+ gets applied automatically to all test cases found in that class.
+ 2. If called with a single function as its only positional argument and
+ no additional keyword arguments, it attaches a MarkInfo object to the
+ function, containing all the arguments already stored internally in
+ the MarkDecorator.
+ 3. When called in any other case, it performs a 'fake construction' call,
+ i.e. it returns a new MarkDecorator instance with the original
+ MarkDecorator's content updated with the arguments passed to this
+ call.
+
+ Note: The rules above prevent MarkDecorator objects from storing only a
+ single function or class reference as their positional argument with no
+ additional keyword or positional arguments.
+
+ """
+ def __init__(self, name, args=None, kwargs=None):
+ self.name = name
+ self.args = args or ()
+ self.kwargs = kwargs or {}
+
+ @property
+ def markname(self):
+ return self.name # for backward-compat (2.4.1 had this attr)
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ name = d.pop('name')
+ return "<MarkDecorator %r %r>" % (name, d)
+
+ def __call__(self, *args, **kwargs):
+ """ if passed a single callable argument: decorate it with mark info.
+ otherwise add *args/**kwargs in-place to mark information. """
+ if args and not kwargs:
+ func = args[0]
+ is_class = inspect.isclass(func)
+ if len(args) == 1 and (istestfunc(func) or is_class):
+ if is_class:
+ if hasattr(func, 'pytestmark'):
+ mark_list = func.pytestmark
+ if not isinstance(mark_list, list):
+ mark_list = [mark_list]
+ # always work on a copy to avoid updating pytestmark
+ # from a superclass by accident
+ mark_list = mark_list + [self]
+ func.pytestmark = mark_list
+ else:
+ func.pytestmark = [self]
+ else:
+ holder = getattr(func, self.name, None)
+ if holder is None:
+ holder = MarkInfo(
+ self.name, self.args, self.kwargs
+ )
+ setattr(func, self.name, holder)
+ else:
+ holder.add(self.args, self.kwargs)
+ return func
+ kw = self.kwargs.copy()
+ kw.update(kwargs)
+ args = self.args + args
+ return self.__class__(self.name, args=args, kwargs=kw)
+
+
+class MarkInfo:
+ """ Marking object created by :class:`MarkDecorator` instances. """
+ def __init__(self, name, args, kwargs):
+ #: name of attribute
+ self.name = name
+ #: positional argument list, empty if none specified
+ self.args = args
+ #: keyword argument dictionary, empty if nothing specified
+ self.kwargs = kwargs.copy()
+ self._arglist = [(args, kwargs.copy())]
+
+ def __repr__(self):
+ return "<MarkInfo %r args=%r kwargs=%r>" % (
+ self.name, self.args, self.kwargs
+ )
+
+ def add(self, args, kwargs):
+ """ add a MarkInfo with the given args and kwargs. """
+ self._arglist.append((args, kwargs))
+ self.args += args
+ self.kwargs.update(kwargs)
+
+ def __iter__(self):
+ """ yield MarkInfo objects each relating to a marking-call. """
+ for args, kwargs in self._arglist:
+ yield MarkInfo(self.name, args, kwargs)
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/monkeypatch.py b/testing/web-platform/tests/tools/pytest/_pytest/monkeypatch.py
new file mode 100644
index 000000000..d4c169d37
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/monkeypatch.py
@@ -0,0 +1,254 @@
+""" monkeypatching and mocking functionality. """
+
+import os, sys
+import re
+
+from py.builtin import _basestring
+
+RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
+
+
+def pytest_funcarg__monkeypatch(request):
+ """The returned ``monkeypatch`` funcarg provides these
+ helper methods to modify objects, dictionaries or os.environ::
+
+ monkeypatch.setattr(obj, name, value, raising=True)
+ monkeypatch.delattr(obj, name, raising=True)
+ monkeypatch.setitem(mapping, name, value)
+ monkeypatch.delitem(obj, name, raising=True)
+ monkeypatch.setenv(name, value, prepend=False)
+ monkeypatch.delenv(name, value, raising=True)
+ monkeypatch.syspath_prepend(path)
+ monkeypatch.chdir(path)
+
+ All modifications will be undone after the requesting
+ test function has finished. The ``raising``
+ parameter determines if a KeyError or AttributeError
+ will be raised if the set/deletion operation has no target.
+ """
+ mpatch = monkeypatch()
+ request.addfinalizer(mpatch.undo)
+ return mpatch
+
+
+def resolve(name):
+ # simplified from zope.dottedname
+ parts = name.split('.')
+
+ used = parts.pop(0)
+ found = __import__(used)
+ for part in parts:
+ used += '.' + part
+ try:
+ found = getattr(found, part)
+ except AttributeError:
+ pass
+ else:
+ continue
+ # we use explicit un-nesting of the handling block in order
+ # to avoid nested exceptions on python 3
+ try:
+ __import__(used)
+ except ImportError as ex:
+ # str is used for py2 vs py3
+ expected = str(ex).split()[-1]
+ if expected == used:
+ raise
+ else:
+ raise ImportError(
+ 'import error in %s: %s' % (used, ex)
+ )
+ found = annotated_getattr(found, part, used)
+ return found
+
+
+def annotated_getattr(obj, name, ann):
+ try:
+ obj = getattr(obj, name)
+ except AttributeError:
+ raise AttributeError(
+ '%r object at %s has no attribute %r' % (
+ type(obj).__name__, ann, name
+ )
+ )
+ return obj
+
+
+def derive_importpath(import_path, raising):
+ if not isinstance(import_path, _basestring) or "." not in import_path:
+ raise TypeError("must be absolute import path string, not %r" %
+ (import_path,))
+ module, attr = import_path.rsplit('.', 1)
+ target = resolve(module)
+ if raising:
+ annotated_getattr(target, attr, ann=module)
+ return attr, target
+
+
+class Notset:
+ def __repr__(self):
+ return "<notset>"
+
+
+notset = Notset()
+
+
+class monkeypatch:
+ """ Object keeping a record of setattr/item/env/syspath changes. """
+
+ def __init__(self):
+ self._setattr = []
+ self._setitem = []
+ self._cwd = None
+ self._savesyspath = None
+
+ def setattr(self, target, name, value=notset, raising=True):
+ """ Set attribute value on target, memorizing the old value.
+ By default raise AttributeError if the attribute did not exist.
+
+ For convenience you can specify a string as ``target`` which
+ will be interpreted as a dotted import path, with the last part
+ being the attribute name. Example:
+ ``monkeypatch.setattr("os.getcwd", lambda x: "/")``
+ would set the ``getcwd`` function of the ``os`` module.
+
+ The ``raising`` value determines if the setattr should fail
+ if the attribute is not already present (defaults to True
+ which means it will raise).
+ """
+ __tracebackhide__ = True
+ import inspect
+
+ if value is notset:
+ if not isinstance(target, _basestring):
+ raise TypeError("use setattr(target, name, value) or "
+ "setattr(target, value) with target being a dotted "
+ "import string")
+ value = name
+ name, target = derive_importpath(target, raising)
+
+ oldval = getattr(target, name, notset)
+ if raising and oldval is notset:
+ raise AttributeError("%r has no attribute %r" % (target, name))
+
+ # avoid class descriptors like staticmethod/classmethod
+ if inspect.isclass(target):
+ oldval = target.__dict__.get(name, notset)
+ self._setattr.append((target, name, oldval))
+ setattr(target, name, value)
+
+ def delattr(self, target, name=notset, raising=True):
+ """ Delete attribute ``name`` from ``target``, by default raise
+ AttributeError it the attribute did not previously exist.
+
+ If no ``name`` is specified and ``target`` is a string
+ it will be interpreted as a dotted import path with the
+ last part being the attribute name.
+
+ If ``raising`` is set to False, no exception will be raised if the
+ attribute is missing.
+ """
+ __tracebackhide__ = True
+ if name is notset:
+ if not isinstance(target, _basestring):
+ raise TypeError("use delattr(target, name) or "
+ "delattr(target) with target being a dotted "
+ "import string")
+ name, target = derive_importpath(target, raising)
+
+ if not hasattr(target, name):
+ if raising:
+ raise AttributeError(name)
+ else:
+ self._setattr.append((target, name, getattr(target, name, notset)))
+ delattr(target, name)
+
+ def setitem(self, dic, name, value):
+ """ Set dictionary entry ``name`` to value. """
+ self._setitem.append((dic, name, dic.get(name, notset)))
+ dic[name] = value
+
+ def delitem(self, dic, name, raising=True):
+ """ Delete ``name`` from dict. Raise KeyError if it doesn't exist.
+
+ If ``raising`` is set to False, no exception will be raised if the
+ key is missing.
+ """
+ if name not in dic:
+ if raising:
+ raise KeyError(name)
+ else:
+ self._setitem.append((dic, name, dic.get(name, notset)))
+ del dic[name]
+
+ def setenv(self, name, value, prepend=None):
+ """ Set environment variable ``name`` to ``value``. If ``prepend``
+ is a character, read the current environment variable value
+ and prepend the ``value`` adjoined with the ``prepend`` character."""
+ value = str(value)
+ if prepend and name in os.environ:
+ value = value + prepend + os.environ[name]
+ self.setitem(os.environ, name, value)
+
+ def delenv(self, name, raising=True):
+ """ Delete ``name`` from the environment. Raise KeyError it does not
+ exist.
+
+ If ``raising`` is set to False, no exception will be raised if the
+ environment variable is missing.
+ """
+ self.delitem(os.environ, name, raising=raising)
+
+ def syspath_prepend(self, path):
+ """ Prepend ``path`` to ``sys.path`` list of import locations. """
+ if self._savesyspath is None:
+ self._savesyspath = sys.path[:]
+ sys.path.insert(0, str(path))
+
+ def chdir(self, path):
+ """ Change the current working directory to the specified path.
+ Path can be a string or a py.path.local object.
+ """
+ if self._cwd is None:
+ self._cwd = os.getcwd()
+ if hasattr(path, "chdir"):
+ path.chdir()
+ else:
+ os.chdir(path)
+
+ def undo(self):
+ """ Undo previous changes. This call consumes the
+ undo stack. Calling it a second time has no effect unless
+ you do more monkeypatching after the undo call.
+
+ There is generally no need to call `undo()`, since it is
+ called automatically during tear-down.
+
+ Note that the same `monkeypatch` fixture is used across a
+ single test function invocation. If `monkeypatch` is used both by
+ the test function itself and one of the test fixtures,
+ calling `undo()` will undo all of the changes made in
+ both functions.
+ """
+ for obj, name, value in reversed(self._setattr):
+ if value is not notset:
+ setattr(obj, name, value)
+ else:
+ delattr(obj, name)
+ self._setattr[:] = []
+ for dictionary, name, value in reversed(self._setitem):
+ if value is notset:
+ try:
+ del dictionary[name]
+ except KeyError:
+ pass # was already deleted, so we have the desired state
+ else:
+ dictionary[name] = value
+ self._setitem[:] = []
+ if self._savesyspath is not None:
+ sys.path[:] = self._savesyspath
+ self._savesyspath = None
+
+ if self._cwd is not None:
+ os.chdir(self._cwd)
+ self._cwd = None
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/nose.py b/testing/web-platform/tests/tools/pytest/_pytest/nose.py
new file mode 100644
index 000000000..038746868
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/nose.py
@@ -0,0 +1,71 @@
+""" run test suites written for nose. """
+
+import sys
+
+import py
+import pytest
+from _pytest import unittest
+
+
+def get_skip_exceptions():
+ skip_classes = set()
+ for module_name in ('unittest', 'unittest2', 'nose'):
+ mod = sys.modules.get(module_name)
+ if hasattr(mod, 'SkipTest'):
+ skip_classes.add(mod.SkipTest)
+ return tuple(skip_classes)
+
+
+def pytest_runtest_makereport(item, call):
+ if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
+ # let's substitute the excinfo with a pytest.skip one
+ call2 = call.__class__(lambda:
+ pytest.skip(str(call.excinfo.value)), call.when)
+ call.excinfo = call2.excinfo
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_runtest_setup(item):
+ if is_potential_nosetest(item):
+ if isinstance(item.parent, pytest.Generator):
+ gen = item.parent
+ if not hasattr(gen, '_nosegensetup'):
+ call_optional(gen.obj, 'setup')
+ if isinstance(gen.parent, pytest.Instance):
+ call_optional(gen.parent.obj, 'setup')
+ gen._nosegensetup = True
+ if not call_optional(item.obj, 'setup'):
+ # call module level setup if there is no object level one
+ call_optional(item.parent.obj, 'setup')
+ #XXX this implies we only call teardown when setup worked
+ item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
+
+def teardown_nose(item):
+ if is_potential_nosetest(item):
+ if not call_optional(item.obj, 'teardown'):
+ call_optional(item.parent.obj, 'teardown')
+ #if hasattr(item.parent, '_nosegensetup'):
+ # #call_optional(item._nosegensetup, 'teardown')
+ # del item.parent._nosegensetup
+
+
+def pytest_make_collect_report(collector):
+ if isinstance(collector, pytest.Generator):
+ call_optional(collector.obj, 'setup')
+
+
+def is_potential_nosetest(item):
+ # extra check needed since we do not do nose style setup/teardown
+ # on direct unittest style classes
+ return isinstance(item, pytest.Function) and \
+ not isinstance(item, unittest.TestCaseFunction)
+
+
+def call_optional(obj, name):
+ method = getattr(obj, name, None)
+ isfixture = hasattr(method, "_pytestfixturefunction")
+ if method is not None and not isfixture and py.builtin.callable(method):
+ # If there's any problems allow the exception to raise rather than
+ # silently ignoring them
+ method()
+ return True
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/pastebin.py b/testing/web-platform/tests/tools/pytest/_pytest/pastebin.py
new file mode 100644
index 000000000..4ec62d022
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/pastebin.py
@@ -0,0 +1,92 @@
+""" submit failure or test session information to a pastebin service. """
+import pytest
+import sys
+import tempfile
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group._addoption('--pastebin', metavar="mode",
+ action='store', dest="pastebin", default=None,
+ choices=['failed', 'all'],
+ help="send failed|all info to bpaste.net pastebin service.")
+
+@pytest.hookimpl(trylast=True)
+def pytest_configure(config):
+ import py
+ if config.option.pastebin == "all":
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ # if no terminal reporter plugin is present, nothing we can do here;
+ # this can happen when this function executes in a slave node
+ # when using pytest-xdist, for example
+ if tr is not None:
+ # pastebin file will be utf-8 encoded binary file
+ config._pastebinfile = tempfile.TemporaryFile('w+b')
+ oldwrite = tr._tw.write
+ def tee_write(s, **kwargs):
+ oldwrite(s, **kwargs)
+ if py.builtin._istext(s):
+ s = s.encode('utf-8')
+ config._pastebinfile.write(s)
+ tr._tw.write = tee_write
+
+def pytest_unconfigure(config):
+ if hasattr(config, '_pastebinfile'):
+ # get terminal contents and delete file
+ config._pastebinfile.seek(0)
+ sessionlog = config._pastebinfile.read()
+ config._pastebinfile.close()
+ del config._pastebinfile
+ # undo our patching in the terminal reporter
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ del tr._tw.__dict__['write']
+ # write summary
+ tr.write_sep("=", "Sending information to Paste Service")
+ pastebinurl = create_new_paste(sessionlog)
+ tr.write_line("pastebin session-log: %s\n" % pastebinurl)
+
+def create_new_paste(contents):
+ """
+ Creates a new paste using bpaste.net service.
+
+ :contents: paste contents as utf-8 encoded bytes
+ :returns: url to the pasted contents
+ """
+ import re
+ if sys.version_info < (3, 0):
+ from urllib import urlopen, urlencode
+ else:
+ from urllib.request import urlopen
+ from urllib.parse import urlencode
+
+ params = {
+ 'code': contents,
+ 'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
+ 'expiry': '1week',
+ }
+ url = 'https://bpaste.net'
+ response = urlopen(url, data=urlencode(params).encode('ascii')).read()
+ m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
+ if m:
+ return '%s/show/%s' % (url, m.group(1))
+ else:
+ return 'bad response: ' + response
+
+def pytest_terminal_summary(terminalreporter):
+ import _pytest.config
+ if terminalreporter.config.option.pastebin != "failed":
+ return
+ tr = terminalreporter
+ if 'failed' in tr.stats:
+ terminalreporter.write_sep("=", "Sending information to Paste Service")
+ for rep in terminalreporter.stats.get('failed'):
+ try:
+ msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
+ except AttributeError:
+ msg = tr._getfailureheadline(rep)
+ tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
+ rep.toterminal(tw)
+ s = tw.stringio.getvalue()
+ assert len(s)
+ pastebinurl = create_new_paste(s)
+ tr.write_line("%s --> %s" %(msg, pastebinurl))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/pdb.py b/testing/web-platform/tests/tools/pytest/_pytest/pdb.py
new file mode 100644
index 000000000..84c920d17
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/pdb.py
@@ -0,0 +1,109 @@
+""" interactive debugging with PDB, the Python Debugger. """
+from __future__ import absolute_import
+import pdb
+import sys
+
+import pytest
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('--pdb',
+ action="store_true", dest="usepdb", default=False,
+ help="start the interactive Python debugger on errors.")
+
+def pytest_namespace():
+ return {'set_trace': pytestPDB().set_trace}
+
+def pytest_configure(config):
+ if config.getvalue("usepdb"):
+ config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
+
+ old = (pdb.set_trace, pytestPDB._pluginmanager)
+ def fin():
+ pdb.set_trace, pytestPDB._pluginmanager = old
+ pytestPDB._config = None
+ pdb.set_trace = pytest.set_trace
+ pytestPDB._pluginmanager = config.pluginmanager
+ pytestPDB._config = config
+ config._cleanup.append(fin)
+
+class pytestPDB:
+ """ Pseudo PDB that defers to the real pdb. """
+ _pluginmanager = None
+ _config = None
+
+ def set_trace(self):
+ """ invoke PDB set_trace debugging, dropping any IO capturing. """
+ import _pytest.config
+ frame = sys._getframe().f_back
+ if self._pluginmanager is not None:
+ capman = self._pluginmanager.getplugin("capturemanager")
+ if capman:
+ capman.suspendcapture(in_=True)
+ tw = _pytest.config.create_terminal_writer(self._config)
+ tw.line()
+ tw.sep(">", "PDB set_trace (IO-capturing turned off)")
+ self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
+ pdb.Pdb().set_trace(frame)
+
+
+class PdbInvoke:
+ def pytest_exception_interact(self, node, call, report):
+ capman = node.config.pluginmanager.getplugin("capturemanager")
+ if capman:
+ out, err = capman.suspendcapture(in_=True)
+ sys.stdout.write(out)
+ sys.stdout.write(err)
+ _enter_pdb(node, call.excinfo, report)
+
+ def pytest_internalerror(self, excrepr, excinfo):
+ for line in str(excrepr).split("\n"):
+ sys.stderr.write("INTERNALERROR> %s\n" %line)
+ sys.stderr.flush()
+ tb = _postmortem_traceback(excinfo)
+ post_mortem(tb)
+
+
+def _enter_pdb(node, excinfo, rep):
+ # XXX we re-use the TerminalReporter's terminalwriter
+ # because this seems to avoid some encoding related troubles
+ # for not completely clear reasons.
+ tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
+ tw.line()
+ tw.sep(">", "traceback")
+ rep.toterminal(tw)
+ tw.sep(">", "entering PDB")
+ tb = _postmortem_traceback(excinfo)
+ post_mortem(tb)
+ rep._pdbshown = True
+ return rep
+
+
+def _postmortem_traceback(excinfo):
+ # A doctest.UnexpectedException is not useful for post_mortem.
+ # Use the underlying exception instead:
+ from doctest import UnexpectedException
+ if isinstance(excinfo.value, UnexpectedException):
+ return excinfo.value.exc_info[2]
+ else:
+ return excinfo._excinfo[2]
+
+
+def _find_last_non_hidden_frame(stack):
+ i = max(0, len(stack) - 1)
+ while i and stack[i][0].f_locals.get("__tracebackhide__", False):
+ i -= 1
+ return i
+
+
+def post_mortem(t):
+ class Pdb(pdb.Pdb):
+ def get_stack(self, f, t):
+ stack, i = pdb.Pdb.get_stack(self, f, t)
+ if f is None:
+ i = _find_last_non_hidden_frame(stack)
+ return stack, i
+ p = Pdb()
+ p.reset()
+ p.interaction(None, t)
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/pytester.py b/testing/web-platform/tests/tools/pytest/_pytest/pytester.py
new file mode 100644
index 000000000..faed7f581
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/pytester.py
@@ -0,0 +1,1110 @@
+""" (disabled by default) support for testing pytest and pytest plugins. """
+import codecs
+import gc
+import os
+import platform
+import re
+import subprocess
+import sys
+import time
+import traceback
+from fnmatch import fnmatch
+
+from py.builtin import print_
+
+from _pytest._code import Source
+import py
+import pytest
+from _pytest.main import Session, EXIT_OK
+
+
+def pytest_addoption(parser):
+ # group = parser.getgroup("pytester", "pytester (self-tests) options")
+ parser.addoption('--lsof',
+ action="store_true", dest="lsof", default=False,
+ help=("run FD checks if lsof is available"))
+
+ parser.addoption('--runpytest', default="inprocess", dest="runpytest",
+ choices=("inprocess", "subprocess", ),
+ help=("run pytest sub runs in tests using an 'inprocess' "
+ "or 'subprocess' (python -m main) method"))
+
+
+def pytest_configure(config):
+ # This might be called multiple times. Only take the first.
+ global _pytest_fullpath
+ try:
+ _pytest_fullpath
+ except NameError:
+ _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+ _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
+
+ if config.getvalue("lsof"):
+ checker = LsofFdLeakChecker()
+ if checker.matching_platform():
+ config.pluginmanager.register(checker)
+
+
+class LsofFdLeakChecker(object):
+ def get_open_files(self):
+ out = self._exec_lsof()
+ open_files = self._parse_lsof_output(out)
+ return open_files
+
+ def _exec_lsof(self):
+ pid = os.getpid()
+ return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
+
+ def _parse_lsof_output(self, out):
+ def isopen(line):
+ return line.startswith('f') and ("deleted" not in line and
+ 'mem' not in line and "txt" not in line and 'cwd' not in line)
+
+ open_files = []
+
+ for line in out.split("\n"):
+ if isopen(line):
+ fields = line.split('\0')
+ fd = fields[0][1:]
+ filename = fields[1][1:]
+ if filename.startswith('/'):
+ open_files.append((fd, filename))
+
+ return open_files
+
+ def matching_platform(self):
+ try:
+ py.process.cmdexec("lsof -v")
+ except (py.process.cmdexec.Error, UnicodeDecodeError):
+ # cmdexec may raise UnicodeDecodeError on Windows systems
+ # with locale other than english:
+ # https://bitbucket.org/pytest-dev/py/issues/66
+ return False
+ else:
+ return True
+
+ @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ def pytest_runtest_item(self, item):
+ lines1 = self.get_open_files()
+ yield
+ if hasattr(sys, "pypy_version_info"):
+ gc.collect()
+ lines2 = self.get_open_files()
+
+ new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+ leaked_files = [t for t in lines2 if t[0] in new_fds]
+ if leaked_files:
+ error = []
+ error.append("***** %s FD leakage detected" % len(leaked_files))
+ error.extend([str(f) for f in leaked_files])
+ error.append("*** Before:")
+ error.extend([str(f) for f in lines1])
+ error.append("*** After:")
+ error.extend([str(f) for f in lines2])
+ error.append(error[0])
+ error.append("*** function %s:%s: %s " % item.location)
+ pytest.fail("\n".join(error), pytrace=False)
+
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+ 'python2.7': r'C:\Python27\python.exe',
+ 'python2.6': r'C:\Python26\python.exe',
+ 'python3.1': r'C:\Python31\python.exe',
+ 'python3.2': r'C:\Python32\python.exe',
+ 'python3.3': r'C:\Python33\python.exe',
+ 'python3.4': r'C:\Python34\python.exe',
+ 'python3.5': r'C:\Python35\python.exe',
+}
+
+def getexecutable(name, cache={}):
+ try:
+ return cache[name]
+ except KeyError:
+ executable = py.path.local.sysfind(name)
+ if executable:
+ if name == "jython":
+ import subprocess
+ popen = subprocess.Popen([str(executable), "--version"],
+ universal_newlines=True, stderr=subprocess.PIPE)
+ out, err = popen.communicate()
+ if not err or "2.5" not in err:
+ executable = None
+ if "2.5.2" in err:
+ executable = None # http://bugs.jython.org/issue1790
+ cache[name] = executable
+ return executable
+
+@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
+ 'pypy', 'pypy3'])
+def anypython(request):
+ name = request.param
+ executable = getexecutable(name)
+ if executable is None:
+ if sys.platform == "win32":
+ executable = winpymap.get(name, None)
+ if executable:
+ executable = py.path.local(executable)
+ if executable.check():
+ return executable
+ pytest.skip("no suitable %s found" % (name,))
+ return executable
+
+# used at least by pytest-xdist plugin
+@pytest.fixture
+def _pytest(request):
+ """ Return a helper which offers a gethookrecorder(hook)
+ method which returns a HookRecorder instance which helps
+ to make assertions about called hooks.
+ """
+ return PytestArg(request)
+
+class PytestArg:
+ def __init__(self, request):
+ self.request = request
+
+ def gethookrecorder(self, hook):
+ hookrecorder = HookRecorder(hook._pm)
+ self.request.addfinalizer(hookrecorder.finish_recording)
+ return hookrecorder
+
+
+def get_public_names(l):
+ """Only return names from iterator l without a leading underscore."""
+ return [x for x in l if x[0] != "_"]
+
+
+class ParsedCall:
+ def __init__(self, name, kwargs):
+ self.__dict__.update(kwargs)
+ self._name = name
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ del d['_name']
+ return "<ParsedCall %r(**%r)>" %(self._name, d)
+
+
+class HookRecorder:
+ """Record all hooks called in a plugin manager.
+
+ This wraps all the hook calls in the plugin manager, recording
+ each call before propagating the normal calls.
+
+ """
+
+ def __init__(self, pluginmanager):
+ self._pluginmanager = pluginmanager
+ self.calls = []
+
+ def before(hook_name, hook_impls, kwargs):
+ self.calls.append(ParsedCall(hook_name, kwargs))
+
+ def after(outcome, hook_name, hook_impls, kwargs):
+ pass
+
+ self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
+
+ def finish_recording(self):
+ self._undo_wrapping()
+
+ def getcalls(self, names):
+ if isinstance(names, str):
+ names = names.split()
+ return [call for call in self.calls if call._name in names]
+
+ def assert_contains(self, entries):
+ __tracebackhide__ = True
+ i = 0
+ entries = list(entries)
+ backlocals = sys._getframe(1).f_locals
+ while entries:
+ name, check = entries.pop(0)
+ for ind, call in enumerate(self.calls[i:]):
+ if call._name == name:
+ print_("NAMEMATCH", name, call)
+ if eval(check, backlocals, call.__dict__):
+ print_("CHECKERMATCH", repr(check), "->", call)
+ else:
+ print_("NOCHECKERMATCH", repr(check), "-", call)
+ continue
+ i += ind + 1
+ break
+ print_("NONAMEMATCH", name, "with", call)
+ else:
+ pytest.fail("could not find %r check %r" % (name, check))
+
+ def popcall(self, name):
+ __tracebackhide__ = True
+ for i, call in enumerate(self.calls):
+ if call._name == name:
+ del self.calls[i]
+ return call
+ lines = ["could not find call %r, in:" % (name,)]
+ lines.extend([" %s" % str(x) for x in self.calls])
+ pytest.fail("\n".join(lines))
+
+ def getcall(self, name):
+ l = self.getcalls(name)
+ assert len(l) == 1, (name, l)
+ return l[0]
+
+ # functionality for test reports
+
+ def getreports(self,
+ names="pytest_runtest_logreport pytest_collectreport"):
+ return [x.report for x in self.getcalls(names)]
+
+ def matchreport(self, inamepart="",
+ names="pytest_runtest_logreport pytest_collectreport", when=None):
+ """ return a testreport whose dotted import path matches """
+ l = []
+ for rep in self.getreports(names=names):
+ try:
+ if not when and rep.when != "call" and rep.passed:
+ # setup/teardown passing reports - let's ignore those
+ continue
+ except AttributeError:
+ pass
+ if when and getattr(rep, 'when', None) != when:
+ continue
+ if not inamepart or inamepart in rep.nodeid.split("::"):
+ l.append(rep)
+ if not l:
+ raise ValueError("could not find test report matching %r: "
+ "no test reports at all!" % (inamepart,))
+ if len(l) > 1:
+ raise ValueError(
+ "found 2 or more testreports matching %r: %s" %(inamepart, l))
+ return l[0]
+
+ def getfailures(self,
+ names='pytest_runtest_logreport pytest_collectreport'):
+ return [rep for rep in self.getreports(names) if rep.failed]
+
+ def getfailedcollections(self):
+ return self.getfailures('pytest_collectreport')
+
+ def listoutcomes(self):
+ passed = []
+ skipped = []
+ failed = []
+ for rep in self.getreports(
+ "pytest_collectreport pytest_runtest_logreport"):
+ if rep.passed:
+ if getattr(rep, "when", None) == "call":
+ passed.append(rep)
+ elif rep.skipped:
+ skipped.append(rep)
+ elif rep.failed:
+ failed.append(rep)
+ return passed, skipped, failed
+
+ def countoutcomes(self):
+ return [len(x) for x in self.listoutcomes()]
+
+ def assertoutcome(self, passed=0, skipped=0, failed=0):
+ realpassed, realskipped, realfailed = self.listoutcomes()
+ assert passed == len(realpassed)
+ assert skipped == len(realskipped)
+ assert failed == len(realfailed)
+
+ def clear(self):
+ self.calls[:] = []
+
+
+@pytest.fixture
+def linecomp(request):
+ return LineComp()
+
+
+def pytest_funcarg__LineMatcher(request):
+ return LineMatcher
+
+
+@pytest.fixture
+def testdir(request, tmpdir_factory):
+ return Testdir(request, tmpdir_factory)
+
+
+rex_outcome = re.compile("(\d+) ([\w-]+)")
+class RunResult:
+ """The result of running a command.
+
+ Attributes:
+
+ :ret: The return value.
+ :outlines: List of lines captured from stdout.
+ :errlines: List of lines captures from stderr.
+ :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
+ reconstruct stdout or the commonly used
+ ``stdout.fnmatch_lines()`` method.
+ :stderrr: :py:class:`LineMatcher` of stderr.
+ :duration: Duration in seconds.
+
+ """
+ def __init__(self, ret, outlines, errlines, duration):
+ self.ret = ret
+ self.outlines = outlines
+ self.errlines = errlines
+ self.stdout = LineMatcher(outlines)
+ self.stderr = LineMatcher(errlines)
+ self.duration = duration
+
+ def parseoutcomes(self):
+ """ Return a dictionary of outcomestring->num from parsing
+ the terminal output that the test process produced."""
+ for line in reversed(self.outlines):
+ if 'seconds' in line:
+ outcomes = rex_outcome.findall(line)
+ if outcomes:
+ d = {}
+ for num, cat in outcomes:
+ d[cat] = int(num)
+ return d
+
+ def assert_outcomes(self, passed=0, skipped=0, failed=0):
+ """ assert that the specified outcomes appear with the respective
+ numbers (0 means it didn't occur) in the text output from a test run."""
+ d = self.parseoutcomes()
+ assert passed == d.get("passed", 0)
+ assert skipped == d.get("skipped", 0)
+ assert failed == d.get("failed", 0)
+
+
+
+class Testdir:
+ """Temporary test directory with tools to test/run py.test itself.
+
+ This is based on the ``tmpdir`` fixture but provides a number of
+ methods which aid with testing py.test itself. Unless
+ :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
+ current working directory.
+
+ Attributes:
+
+ :tmpdir: The :py:class:`py.path.local` instance of the temporary
+ directory.
+
+ :plugins: A list of plugins to use with :py:meth:`parseconfig` and
+ :py:meth:`runpytest`. Initially this is an empty list but
+ plugins can be added to the list. The type of items to add to
+ the list depend on the method which uses them so refer to them
+ for details.
+
+ """
+
+ def __init__(self, request, tmpdir_factory):
+ self.request = request
+ # XXX remove duplication with tmpdir plugin
+ basetmp = tmpdir_factory.ensuretemp("testdir")
+ name = request.function.__name__
+ for i in range(100):
+ try:
+ tmpdir = basetmp.mkdir(name + str(i))
+ except py.error.EEXIST:
+ continue
+ break
+ self.tmpdir = tmpdir
+ self.plugins = []
+ self._savesyspath = (list(sys.path), list(sys.meta_path))
+ self._savemodulekeys = set(sys.modules)
+ self.chdir() # always chdir
+ self.request.addfinalizer(self.finalize)
+ method = self.request.config.getoption("--runpytest")
+ if method == "inprocess":
+ self._runpytest_method = self.runpytest_inprocess
+ elif method == "subprocess":
+ self._runpytest_method = self.runpytest_subprocess
+
+ def __repr__(self):
+ return "<Testdir %r>" % (self.tmpdir,)
+
+ def finalize(self):
+ """Clean up global state artifacts.
+
+ Some methods modify the global interpreter state and this
+ tries to clean this up. It does not remove the temporary
+ directory however so it can be looked at after the test run
+ has finished.
+
+ """
+ sys.path[:], sys.meta_path[:] = self._savesyspath
+ if hasattr(self, '_olddir'):
+ self._olddir.chdir()
+ self.delete_loaded_modules()
+
+ def delete_loaded_modules(self):
+ """Delete modules that have been loaded during a test.
+
+ This allows the interpreter to catch module changes in case
+ the module is re-imported.
+ """
+ for name in set(sys.modules).difference(self._savemodulekeys):
+ # it seems zope.interfaces is keeping some state
+ # (used by twisted related tests)
+ if name != "zope.interface":
+ del sys.modules[name]
+
+ def make_hook_recorder(self, pluginmanager):
+ """Create a new :py:class:`HookRecorder` for a PluginManager."""
+ assert not hasattr(pluginmanager, "reprec")
+ pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
+ self.request.addfinalizer(reprec.finish_recording)
+ return reprec
+
+ def chdir(self):
+ """Cd into the temporary directory.
+
+ This is done automatically upon instantiation.
+
+ """
+ old = self.tmpdir.chdir()
+ if not hasattr(self, '_olddir'):
+ self._olddir = old
+
+ def _makefile(self, ext, args, kwargs):
+ items = list(kwargs.items())
+ if args:
+ source = py.builtin._totext("\n").join(
+ map(py.builtin._totext, args)) + py.builtin._totext("\n")
+ basename = self.request.function.__name__
+ items.insert(0, (basename, source))
+ ret = None
+ for name, value in items:
+ p = self.tmpdir.join(name).new(ext=ext)
+ source = Source(value)
+ def my_totext(s, encoding="utf-8"):
+ if py.builtin._isbytes(s):
+ s = py.builtin._totext(s, encoding=encoding)
+ return s
+ source_unicode = "\n".join([my_totext(line) for line in source.lines])
+ source = py.builtin._totext(source_unicode)
+ content = source.strip().encode("utf-8") # + "\n"
+ #content = content.rstrip() + "\n"
+ p.write(content, "wb")
+ if ret is None:
+ ret = p
+ return ret
+
+ def makefile(self, ext, *args, **kwargs):
+ """Create a new file in the testdir.
+
+ ext: The extension the file should use, including the dot.
+ E.g. ".py".
+
+ args: All args will be treated as strings and joined using
+ newlines. The result will be written as contents to the
+ file. The name of the file will be based on the test
+ function requesting this fixture.
+ E.g. "testdir.makefile('.txt', 'line1', 'line2')"
+
+ kwargs: Each keyword is the name of a file, while the value of
+ it will be written as contents of the file.
+ E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
+
+ """
+ return self._makefile(ext, args, kwargs)
+
+ def makeconftest(self, source):
+ """Write a contest.py file with 'source' as contents."""
+ return self.makepyfile(conftest=source)
+
+ def makeini(self, source):
+ """Write a tox.ini file with 'source' as contents."""
+ return self.makefile('.ini', tox=source)
+
+ def getinicfg(self, source):
+ """Return the pytest section from the tox.ini config file."""
+ p = self.makeini(source)
+ return py.iniconfig.IniConfig(p)['pytest']
+
+ def makepyfile(self, *args, **kwargs):
+ """Shortcut for .makefile() with a .py extension."""
+ return self._makefile('.py', args, kwargs)
+
+ def maketxtfile(self, *args, **kwargs):
+ """Shortcut for .makefile() with a .txt extension."""
+ return self._makefile('.txt', args, kwargs)
+
+ def syspathinsert(self, path=None):
+ """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
+
+ This is undone automatically after the test.
+ """
+ if path is None:
+ path = self.tmpdir
+ sys.path.insert(0, str(path))
+ # a call to syspathinsert() usually means that the caller
+ # wants to import some dynamically created files.
+ # with python3 we thus invalidate import caches.
+ self._possibly_invalidate_import_caches()
+
+ def _possibly_invalidate_import_caches(self):
+ # invalidate caches if we can (py33 and above)
+ try:
+ import importlib
+ except ImportError:
+ pass
+ else:
+ if hasattr(importlib, "invalidate_caches"):
+ importlib.invalidate_caches()
+
+ def mkdir(self, name):
+ """Create a new (sub)directory."""
+ return self.tmpdir.mkdir(name)
+
+ def mkpydir(self, name):
+ """Create a new python package.
+
+ This creates a (sub)direcotry with an empty ``__init__.py``
+ file so that is recognised as a python package.
+
+ """
+ p = self.mkdir(name)
+ p.ensure("__init__.py")
+ return p
+
+ Session = Session
+ def getnode(self, config, arg):
+ """Return the collection node of a file.
+
+ :param config: :py:class:`_pytest.config.Config` instance, see
+ :py:meth:`parseconfig` and :py:meth:`parseconfigure` to
+ create the configuration.
+
+ :param arg: A :py:class:`py.path.local` instance of the file.
+
+ """
+ session = Session(config)
+ assert '::' not in str(arg)
+ p = py.path.local(arg)
+ config.hook.pytest_sessionstart(session=session)
+ res = session.perform_collect([str(p)], genitems=False)[0]
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return res
+
+ def getpathnode(self, path):
+ """Return the collection node of a file.
+
+ This is like :py:meth:`getnode` but uses
+ :py:meth:`parseconfigure` to create the (configured) py.test
+ Config instance.
+
+ :param path: A :py:class:`py.path.local` instance of the file.
+
+ """
+ config = self.parseconfigure(path)
+ session = Session(config)
+ x = session.fspath.bestrelpath(path)
+ config.hook.pytest_sessionstart(session=session)
+ res = session.perform_collect([x], genitems=False)[0]
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return res
+
+ def genitems(self, colitems):
+ """Generate all test items from a collection node.
+
+ This recurses into the collection node and returns a list of
+ all the test items contained within.
+
+ """
+ session = colitems[0].session
+ result = []
+ for colitem in colitems:
+ result.extend(session.genitems(colitem))
+ return result
+
+ def runitem(self, source):
+ """Run the "test_func" Item.
+
+ The calling test instance (the class which contains the test
+ method) must provide a ``.getrunner()`` method which should
+ return a runner which can run the test protocol for a single
+ item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
+
+ """
+ # used from runner functional tests
+ item = self.getitem(source)
+ # the test class where we are called from wants to provide the runner
+ testclassinstance = self.request.instance
+ runner = testclassinstance.getrunner()
+ return runner(item)
+
+ def inline_runsource(self, source, *cmdlineargs):
+ """Run a test module in process using ``pytest.main()``.
+
+ This run writes "source" into a temporary file and runs
+ ``pytest.main()`` on it, returning a :py:class:`HookRecorder`
+ instance for the result.
+
+ :param source: The source code of the test module.
+
+ :param cmdlineargs: Any extra command line arguments to use.
+
+ :return: :py:class:`HookRecorder` instance of the result.
+
+ """
+ p = self.makepyfile(source)
+ l = list(cmdlineargs) + [p]
+ return self.inline_run(*l)
+
+ def inline_genitems(self, *args):
+ """Run ``pytest.main(['--collectonly'])`` in-process.
+
+ Retuns a tuple of the collected items and a
+ :py:class:`HookRecorder` instance.
+
+ This runs the :py:func:`pytest.main` function to run all of
+ py.test inside the test process itself like
+ :py:meth:`inline_run`. However the return value is a tuple of
+ the collection items and a :py:class:`HookRecorder` instance.
+
+ """
+ rec = self.inline_run("--collect-only", *args)
+ items = [x.item for x in rec.getcalls("pytest_itemcollected")]
+ return items, rec
+
+ def inline_run(self, *args, **kwargs):
+ """Run ``pytest.main()`` in-process, returning a HookRecorder.
+
+ This runs the :py:func:`pytest.main` function to run all of
+ py.test inside the test process itself. This means it can
+ return a :py:class:`HookRecorder` instance which gives more
+ detailed results from then run then can be done by matching
+ stdout/stderr from :py:meth:`runpytest`.
+
+ :param args: Any command line arguments to pass to
+ :py:func:`pytest.main`.
+
+ :param plugin: (keyword-only) Extra plugin instances the
+ ``pytest.main()`` instance should use.
+
+ :return: A :py:class:`HookRecorder` instance.
+
+ """
+ rec = []
+ class Collect:
+ def pytest_configure(x, config):
+ rec.append(self.make_hook_recorder(config.pluginmanager))
+
+ plugins = kwargs.get("plugins") or []
+ plugins.append(Collect())
+ ret = pytest.main(list(args), plugins=plugins)
+ self.delete_loaded_modules()
+ if len(rec) == 1:
+ reprec = rec.pop()
+ else:
+ class reprec:
+ pass
+ reprec.ret = ret
+
+ # typically we reraise keyboard interrupts from the child run
+ # because it's our user requesting interruption of the testing
+ if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
+ calls = reprec.getcalls("pytest_keyboard_interrupt")
+ if calls and calls[-1].excinfo.type == KeyboardInterrupt:
+ raise KeyboardInterrupt()
+ return reprec
+
+ def runpytest_inprocess(self, *args, **kwargs):
+ """ Return result of running pytest in-process, providing a similar
+ interface to what self.runpytest() provides. """
+ if kwargs.get("syspathinsert"):
+ self.syspathinsert()
+ now = time.time()
+ capture = py.io.StdCapture()
+ try:
+ try:
+ reprec = self.inline_run(*args, **kwargs)
+ except SystemExit as e:
+ class reprec:
+ ret = e.args[0]
+ except Exception:
+ traceback.print_exc()
+ class reprec:
+ ret = 3
+ finally:
+ out, err = capture.reset()
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+
+ res = RunResult(reprec.ret,
+ out.split("\n"), err.split("\n"),
+ time.time()-now)
+ res.reprec = reprec
+ return res
+
+ def runpytest(self, *args, **kwargs):
+ """ Run pytest inline or in a subprocess, depending on the command line
+ option "--runpytest" and return a :py:class:`RunResult`.
+
+ """
+ args = self._ensure_basetemp(args)
+ return self._runpytest_method(*args, **kwargs)
+
+ def _ensure_basetemp(self, args):
+ args = [str(x) for x in args]
+ for x in args:
+ if str(x).startswith('--basetemp'):
+ #print ("basedtemp exists: %s" %(args,))
+ break
+ else:
+ args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
+ #print ("added basetemp: %s" %(args,))
+ return args
+
+ def parseconfig(self, *args):
+ """Return a new py.test Config instance from given commandline args.
+
+ This invokes the py.test bootstrapping code in _pytest.config
+ to create a new :py:class:`_pytest.core.PluginManager` and
+ call the pytest_cmdline_parse hook to create new
+ :py:class:`_pytest.config.Config` instance.
+
+ If :py:attr:`plugins` has been populated they should be plugin
+ modules which will be registered with the PluginManager.
+
+ """
+ args = self._ensure_basetemp(args)
+
+ import _pytest.config
+ config = _pytest.config._prepareconfig(args, self.plugins)
+ # we don't know what the test will do with this half-setup config
+ # object and thus we make sure it gets unconfigured properly in any
+ # case (otherwise capturing could still be active, for example)
+ self.request.addfinalizer(config._ensure_unconfigure)
+ return config
+
+ def parseconfigure(self, *args):
+ """Return a new py.test configured Config instance.
+
+ This returns a new :py:class:`_pytest.config.Config` instance
+ like :py:meth:`parseconfig`, but also calls the
+ pytest_configure hook.
+
+ """
+ config = self.parseconfig(*args)
+ config._do_configure()
+ self.request.addfinalizer(config._ensure_unconfigure)
+ return config
+
+ def getitem(self, source, funcname="test_func"):
+ """Return the test item for a test function.
+
+ This writes the source to a python file and runs py.test's
+ collection on the resulting module, returning the test item
+ for the requested function name.
+
+ :param source: The module source.
+
+ :param funcname: The name of the test function for which the
+ Item must be returned.
+
+ """
+ items = self.getitems(source)
+ for item in items:
+ if item.name == funcname:
+ return item
+ assert 0, "%r item not found in module:\n%s\nitems: %s" %(
+ funcname, source, items)
+
+ def getitems(self, source):
+ """Return all test items collected from the module.
+
+ This writes the source to a python file and runs py.test's
+ collection on the resulting module, returning all test items
+ contained within.
+
+ """
+ modcol = self.getmodulecol(source)
+ return self.genitems([modcol])
+
+ def getmodulecol(self, source, configargs=(), withinit=False):
+ """Return the module collection node for ``source``.
+
+ This writes ``source`` to a file using :py:meth:`makepyfile`
+ and then runs the py.test collection on it, returning the
+ collection node for the test module.
+
+ :param source: The source code of the module to collect.
+
+ :param configargs: Any extra arguments to pass to
+ :py:meth:`parseconfigure`.
+
+ :param withinit: Whether to also write a ``__init__.py`` file
+ to the temporarly directory to ensure it is a package.
+
+ """
+ kw = {self.request.function.__name__: Source(source).strip()}
+ path = self.makepyfile(**kw)
+ if withinit:
+ self.makepyfile(__init__ = "#")
+ self.config = config = self.parseconfigure(path, *configargs)
+ node = self.getnode(config, path)
+ return node
+
+ def collect_by_name(self, modcol, name):
+ """Return the collection node for name from the module collection.
+
+ This will search a module collection node for a collection
+ node matching the given name.
+
+ :param modcol: A module collection node, see
+ :py:meth:`getmodulecol`.
+
+ :param name: The name of the node to return.
+
+ """
+ for colitem in modcol._memocollect():
+ if colitem.name == name:
+ return colitem
+
+ def popen(self, cmdargs, stdout, stderr, **kw):
+ """Invoke subprocess.Popen.
+
+ This calls subprocess.Popen making sure the current working
+ directory is the PYTHONPATH.
+
+ You probably want to use :py:meth:`run` instead.
+
+ """
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.pathsep.join(filter(None, [
+ str(os.getcwd()), env.get('PYTHONPATH', '')]))
+ kw['env'] = env
+ return subprocess.Popen(cmdargs,
+ stdout=stdout, stderr=stderr, **kw)
+
+ def run(self, *cmdargs):
+ """Run a command with arguments.
+
+ Run a process using subprocess.Popen saving the stdout and
+ stderr.
+
+ Returns a :py:class:`RunResult`.
+
+ """
+ return self._run(*cmdargs)
+
+ def _run(self, *cmdargs):
+ cmdargs = [str(x) for x in cmdargs]
+ p1 = self.tmpdir.join("stdout")
+ p2 = self.tmpdir.join("stderr")
+ print_("running:", ' '.join(cmdargs))
+ print_(" in:", str(py.path.local()))
+ f1 = codecs.open(str(p1), "w", encoding="utf8")
+ f2 = codecs.open(str(p2), "w", encoding="utf8")
+ try:
+ now = time.time()
+ popen = self.popen(cmdargs, stdout=f1, stderr=f2,
+ close_fds=(sys.platform != "win32"))
+ ret = popen.wait()
+ finally:
+ f1.close()
+ f2.close()
+ f1 = codecs.open(str(p1), "r", encoding="utf8")
+ f2 = codecs.open(str(p2), "r", encoding="utf8")
+ try:
+ out = f1.read().splitlines()
+ err = f2.read().splitlines()
+ finally:
+ f1.close()
+ f2.close()
+ self._dump_lines(out, sys.stdout)
+ self._dump_lines(err, sys.stderr)
+ return RunResult(ret, out, err, time.time()-now)
+
+ def _dump_lines(self, lines, fp):
+ try:
+ for line in lines:
+ py.builtin.print_(line, file=fp)
+ except UnicodeEncodeError:
+ print("couldn't print to %s because of encoding" % (fp,))
+
+ def _getpytestargs(self):
+ # we cannot use "(sys.executable,script)"
+ # because on windows the script is e.g. a py.test.exe
+ return (sys.executable, _pytest_fullpath,) # noqa
+
+ def runpython(self, script):
+ """Run a python script using sys.executable as interpreter.
+
+ Returns a :py:class:`RunResult`.
+ """
+ return self.run(sys.executable, script)
+
+ def runpython_c(self, command):
+ """Run python -c "command", return a :py:class:`RunResult`."""
+ return self.run(sys.executable, "-c", command)
+
+ def runpytest_subprocess(self, *args, **kwargs):
+ """Run py.test as a subprocess with given arguments.
+
+ Any plugins added to the :py:attr:`plugins` list will added
+ using the ``-p`` command line option. Addtionally
+ ``--basetemp`` is used put any temporary files and directories
+ in a numbered directory prefixed with "runpytest-" so they do
+ not conflict with the normal numberd pytest location for
+ temporary files and directories.
+
+ Returns a :py:class:`RunResult`.
+
+ """
+ p = py.path.local.make_numbered_dir(prefix="runpytest-",
+ keep=None, rootdir=self.tmpdir)
+ args = ('--basetemp=%s' % p, ) + args
+ #for x in args:
+ # if '--confcutdir' in str(x):
+ # break
+ #else:
+ # pass
+ # args = ('--confcutdir=.',) + args
+ plugins = [x for x in self.plugins if isinstance(x, str)]
+ if plugins:
+ args = ('-p', plugins[0]) + args
+ args = self._getpytestargs() + args
+ return self.run(*args)
+
+ def spawn_pytest(self, string, expect_timeout=10.0):
+ """Run py.test using pexpect.
+
+ This makes sure to use the right py.test and sets up the
+ temporary directory locations.
+
+ The pexpect child is returned.
+
+ """
+ basetemp = self.tmpdir.mkdir("pexpect")
+ invoke = " ".join(map(str, self._getpytestargs()))
+ cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
+ return self.spawn(cmd, expect_timeout=expect_timeout)
+
+ def spawn(self, cmd, expect_timeout=10.0):
+ """Run a command using pexpect.
+
+ The pexpect child is returned.
+ """
+ pexpect = pytest.importorskip("pexpect", "3.0")
+ if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
+ pytest.skip("pypy-64 bit not supported")
+ if sys.platform == "darwin":
+ pytest.xfail("pexpect does not work reliably on darwin?!")
+ if sys.platform.startswith("freebsd"):
+ pytest.xfail("pexpect does not work reliably on freebsd")
+ logfile = self.tmpdir.join("spawn.out").open("wb")
+ child = pexpect.spawn(cmd, logfile=logfile)
+ self.request.addfinalizer(logfile.close)
+ child.timeout = expect_timeout
+ return child
+
+def getdecoded(out):
+ try:
+ return out.decode("utf-8")
+ except UnicodeDecodeError:
+ return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
+ py.io.saferepr(out),)
+
+
+class LineComp:
+ def __init__(self):
+ self.stringio = py.io.TextIO()
+
+ def assert_contains_lines(self, lines2):
+ """ assert that lines2 are contained (linearly) in lines1.
+ return a list of extralines found.
+ """
+ __tracebackhide__ = True
+ val = self.stringio.getvalue()
+ self.stringio.truncate(0)
+ self.stringio.seek(0)
+ lines1 = val.split("\n")
+ return LineMatcher(lines1).fnmatch_lines(lines2)
+
+
+class LineMatcher:
+ """Flexible matching of text.
+
+ This is a convenience class to test large texts like the output of
+ commands.
+
+ The constructor takes a list of lines without their trailing
+ newlines, i.e. ``text.splitlines()``.
+
+ """
+
+ def __init__(self, lines):
+ self.lines = lines
+
+ def str(self):
+ """Return the entire original text."""
+ return "\n".join(self.lines)
+
+ def _getlines(self, lines2):
+ if isinstance(lines2, str):
+ lines2 = Source(lines2)
+ if isinstance(lines2, Source):
+ lines2 = lines2.strip().lines
+ return lines2
+
+ def fnmatch_lines_random(self, lines2):
+ """Check lines exist in the output.
+
+ The argument is a list of lines which have to occur in the
+ output, in any order. Each line can contain glob whildcards.
+
+ """
+ lines2 = self._getlines(lines2)
+ for line in lines2:
+ for x in self.lines:
+ if line == x or fnmatch(x, line):
+ print_("matched: ", repr(line))
+ break
+ else:
+ raise ValueError("line %r not found in output" % line)
+
+ def get_lines_after(self, fnline):
+ """Return all lines following the given line in the text.
+
+ The given line can contain glob wildcards.
+ """
+ for i, line in enumerate(self.lines):
+ if fnline == line or fnmatch(line, fnline):
+ return self.lines[i+1:]
+ raise ValueError("line %r not found in output" % fnline)
+
+ def fnmatch_lines(self, lines2):
+ """Search the text for matching lines.
+
+ The argument is a list of lines which have to match and can
+ use glob wildcards. If they do not match an pytest.fail() is
+ called. The matches and non-matches are also printed on
+ stdout.
+
+ """
+ def show(arg1, arg2):
+ py.builtin.print_(arg1, arg2, file=sys.stderr)
+ lines2 = self._getlines(lines2)
+ lines1 = self.lines[:]
+ nextline = None
+ extralines = []
+ __tracebackhide__ = True
+ for line in lines2:
+ nomatchprinted = False
+ while lines1:
+ nextline = lines1.pop(0)
+ if line == nextline:
+ show("exact match:", repr(line))
+ break
+ elif fnmatch(nextline, line):
+ show("fnmatch:", repr(line))
+ show(" with:", repr(nextline))
+ break
+ else:
+ if not nomatchprinted:
+ show("nomatch:", repr(line))
+ nomatchprinted = True
+ show(" and:", repr(nextline))
+ extralines.append(nextline)
+ else:
+ pytest.fail("remains unmatched: %r, see stderr" % (line,))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/python.py b/testing/web-platform/tests/tools/pytest/_pytest/python.py
new file mode 100644
index 000000000..3580eae07
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/python.py
@@ -0,0 +1,2302 @@
+""" Python test discovery, setup and run of test functions. """
+import fnmatch
+import functools
+import inspect
+import re
+import types
+import sys
+
+import py
+import pytest
+from _pytest._code.code import TerminalRepr
+from _pytest.mark import MarkDecorator, MarkerError
+
+try:
+ import enum
+except ImportError: # pragma: no cover
+ # Only available in Python 3.4+ or as a backport
+ enum = None
+
+import _pytest
+import _pytest._pluggy as pluggy
+
+cutdir2 = py.path.local(_pytest.__file__).dirpath()
+cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
+
+
+NoneType = type(None)
+NOTSET = object()
+isfunction = inspect.isfunction
+isclass = inspect.isclass
+callable = py.builtin.callable
+# used to work around a python2 exception info leak
+exc_clear = getattr(sys, 'exc_clear', lambda: None)
+# The type of re.compile objects is not exposed in Python.
+REGEX_TYPE = type(re.compile(''))
+
+_PY3 = sys.version_info > (3, 0)
+_PY2 = not _PY3
+
+
+if hasattr(inspect, 'signature'):
+ def _format_args(func):
+ return str(inspect.signature(func))
+else:
+ def _format_args(func):
+ return inspect.formatargspec(*inspect.getargspec(func))
+
+if sys.version_info[:2] == (2, 6):
+ def isclass(object):
+ """ Return true if the object is a class. Overrides inspect.isclass for
+ python 2.6 because it will return True for objects which always return
+ something on __getattr__ calls (see #1035).
+ Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
+ """
+ return isinstance(object, (type, types.ClassType))
+
+def _has_positional_arg(func):
+ return func.__code__.co_argcount
+
+
+def filter_traceback(entry):
+ # entry.path might sometimes return a str object when the entry
+ # points to dynamically generated code
+ # see https://bitbucket.org/pytest-dev/py/issues/71
+ raw_filename = entry.frame.code.raw.co_filename
+ is_generated = '<' in raw_filename and '>' in raw_filename
+ if is_generated:
+ return False
+ # entry.path might point to an inexisting file, in which case it will
+ # alsso return a str object. see #1133
+ p = py.path.local(entry.path)
+ return p != cutdir1 and not p.relto(cutdir2)
+
+
+def get_real_func(obj):
+ """ gets the real function object of the (possibly) wrapped object by
+ functools.wraps or functools.partial.
+ """
+ while hasattr(obj, "__wrapped__"):
+ obj = obj.__wrapped__
+ if isinstance(obj, functools.partial):
+ obj = obj.func
+ return obj
+
+def getfslineno(obj):
+ # xxx let decorators etc specify a sane ordering
+ obj = get_real_func(obj)
+ if hasattr(obj, 'place_as'):
+ obj = obj.place_as
+ fslineno = _pytest._code.getfslineno(obj)
+ assert isinstance(fslineno[1], int), obj
+ return fslineno
+
+def getimfunc(func):
+ try:
+ return func.__func__
+ except AttributeError:
+ try:
+ return func.im_func
+ except AttributeError:
+ return func
+
+def safe_getattr(object, name, default):
+ """ Like getattr but return default upon any Exception.
+
+ Attribute access can potentially fail for 'evil' Python objects.
+ See issue214
+ """
+ try:
+ return getattr(object, name, default)
+ except Exception:
+ return default
+
+
+class FixtureFunctionMarker:
+ def __init__(self, scope, params,
+ autouse=False, yieldctx=False, ids=None):
+ self.scope = scope
+ self.params = params
+ self.autouse = autouse
+ self.yieldctx = yieldctx
+ self.ids = ids
+
+ def __call__(self, function):
+ if isclass(function):
+ raise ValueError(
+ "class fixtures not supported (may be in the future)")
+ function._pytestfixturefunction = self
+ return function
+
+
+def fixture(scope="function", params=None, autouse=False, ids=None):
+ """ (return a) decorator to mark a fixture factory function.
+
+ This decorator can be used (with or or without parameters) to define
+ a fixture function. The name of the fixture function can later be
+ referenced to cause its invocation ahead of running tests: test
+ modules or classes can use the pytest.mark.usefixtures(fixturename)
+ marker. Test functions can directly use fixture names as input
+ arguments in which case the fixture instance returned from the fixture
+ function will be injected.
+
+ :arg scope: the scope for which this fixture is shared, one of
+ "function" (default), "class", "module", "session".
+
+ :arg params: an optional list of parameters which will cause multiple
+ invocations of the fixture function and all of the tests
+ using it.
+
+ :arg autouse: if True, the fixture func is activated for all tests that
+ can see it. If False (the default) then an explicit
+ reference is needed to activate the fixture.
+
+ :arg ids: list of string ids each corresponding to the params
+ so that they are part of the test id. If no ids are provided
+ they will be generated automatically from the params.
+
+ """
+ if callable(scope) and params is None and autouse == False:
+ # direct decoration
+ return FixtureFunctionMarker(
+ "function", params, autouse)(scope)
+ if params is not None and not isinstance(params, (list, tuple)):
+ params = list(params)
+ return FixtureFunctionMarker(scope, params, autouse, ids=ids)
+
+def yield_fixture(scope="function", params=None, autouse=False, ids=None):
+ """ (return a) decorator to mark a yield-fixture factory function
+ (EXPERIMENTAL).
+
+ This takes the same arguments as :py:func:`pytest.fixture` but
+ expects a fixture function to use a ``yield`` instead of a ``return``
+ statement to provide a fixture. See
+ http://pytest.org/en/latest/yieldfixture.html for more info.
+ """
+ if callable(scope) and params is None and autouse == False:
+ # direct decoration
+ return FixtureFunctionMarker(
+ "function", params, autouse, yieldctx=True)(scope)
+ else:
+ return FixtureFunctionMarker(scope, params, autouse,
+ yieldctx=True, ids=ids)
+
+defaultfuncargprefixmarker = fixture()
+
+def pyobj_property(name):
+ def get(self):
+ node = self.getparent(getattr(pytest, name))
+ if node is not None:
+ return node.obj
+ doc = "python %s object this node was collected from (can be None)." % (
+ name.lower(),)
+ return property(get, None, None, doc)
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption('--fixtures', '--funcargs',
+ action="store_true", dest="showfixtures", default=False,
+ help="show available fixtures, sorted by plugin appearance")
+ parser.addini("usefixtures", type="args", default=[],
+ help="list of default fixtures to be used with this project")
+ parser.addini("python_files", type="args",
+ default=['test_*.py', '*_test.py'],
+ help="glob-style file patterns for Python test module discovery")
+ parser.addini("python_classes", type="args", default=["Test",],
+ help="prefixes or glob names for Python test class discovery")
+ parser.addini("python_functions", type="args", default=["test",],
+ help="prefixes or glob names for Python test function and "
+ "method discovery")
+
+ group.addoption("--import-mode", default="prepend",
+ choices=["prepend", "append"], dest="importmode",
+ help="prepend/append to sys.path when importing test modules, "
+ "default is to prepend.")
+
+
+def pytest_cmdline_main(config):
+ if config.option.showfixtures:
+ showfixtures(config)
+ return 0
+
+
+def pytest_generate_tests(metafunc):
+ # those alternative spellings are common - raise a specific error to alert
+ # the user
+ alt_spellings = ['parameterize', 'parametrise', 'parameterise']
+ for attr in alt_spellings:
+ if hasattr(metafunc.function, attr):
+ msg = "{0} has '{1}', spelling should be 'parametrize'"
+ raise MarkerError(msg.format(metafunc.function.__name__, attr))
+ try:
+ markers = metafunc.function.parametrize
+ except AttributeError:
+ return
+ for marker in markers:
+ metafunc.parametrize(*marker.args, **marker.kwargs)
+
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "parametrize(argnames, argvalues): call a test function multiple "
+ "times passing in different arguments in turn. argvalues generally "
+ "needs to be a list of values if argnames specifies only one name "
+ "or a list of tuples of values if argnames specifies multiple names. "
+ "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
+ "decorated test function, one with arg1=1 and another with arg1=2."
+ "see http://pytest.org/latest/parametrize.html for more info and "
+ "examples."
+ )
+ config.addinivalue_line("markers",
+ "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
+ "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
+ )
+
+def pytest_sessionstart(session):
+ session._fixturemanager = FixtureManager(session)
+
+@pytest.hookimpl(trylast=True)
+def pytest_namespace():
+ raises.Exception = pytest.fail.Exception
+ return {
+ 'fixture': fixture,
+ 'yield_fixture': yield_fixture,
+ 'raises' : raises,
+ 'collect': {
+ 'Module': Module, 'Class': Class, 'Instance': Instance,
+ 'Function': Function, 'Generator': Generator,
+ '_fillfuncargs': fillfixtures}
+ }
+
+@fixture(scope="session")
+def pytestconfig(request):
+ """ the pytest config object with access to command line opts."""
+ return request.config
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_pyfunc_call(pyfuncitem):
+ testfunction = pyfuncitem.obj
+ if pyfuncitem._isyieldedfunction():
+ testfunction(*pyfuncitem._args)
+ else:
+ funcargs = pyfuncitem.funcargs
+ testargs = {}
+ for arg in pyfuncitem._fixtureinfo.argnames:
+ testargs[arg] = funcargs[arg]
+ testfunction(**testargs)
+ return True
+
+def pytest_collect_file(path, parent):
+ ext = path.ext
+ if ext == ".py":
+ if not parent.session.isinitpath(path):
+ for pat in parent.config.getini('python_files'):
+ if path.fnmatch(pat):
+ break
+ else:
+ return
+ ihook = parent.session.gethookproxy(path)
+ return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
+
+def pytest_pycollect_makemodule(path, parent):
+ return Module(path, parent)
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_pycollect_makeitem(collector, name, obj):
+ outcome = yield
+ res = outcome.get_result()
+ if res is not None:
+ raise StopIteration
+ # nothing was collected elsewhere, let's do it here
+ if isclass(obj):
+ if collector.istestclass(obj, name):
+ Class = collector._getcustomclass("Class")
+ outcome.force_result(Class(name, parent=collector))
+ elif collector.istestfunction(obj, name):
+ # mock seems to store unbound methods (issue473), normalize it
+ obj = getattr(obj, "__func__", obj)
+ # We need to try and unwrap the function if it's a functools.partial
+ # or a funtools.wrapped.
+ # We musn't if it's been wrapped with mock.patch (python 2 only)
+ if not (isfunction(obj) or isfunction(get_real_func(obj))):
+ collector.warn(code="C2", message=
+ "cannot collect %r because it is not a function."
+ % name, )
+ elif getattr(obj, "__test__", True):
+ if is_generator(obj):
+ res = Generator(name, parent=collector)
+ else:
+ res = list(collector._genfunctions(name, obj))
+ outcome.force_result(res)
+
+def is_generator(func):
+ try:
+ return _pytest._code.getrawcode(func).co_flags & 32 # generator function
+ except AttributeError: # builtin functions have no bytecode
+ # assume them to not be generators
+ return False
+
+class PyobjContext(object):
+ module = pyobj_property("Module")
+ cls = pyobj_property("Class")
+ instance = pyobj_property("Instance")
+
+class PyobjMixin(PyobjContext):
+ def obj():
+ def fget(self):
+ try:
+ return self._obj
+ except AttributeError:
+ self._obj = obj = self._getobj()
+ return obj
+ def fset(self, value):
+ self._obj = value
+ return property(fget, fset, None, "underlying python object")
+ obj = obj()
+
+ def _getobj(self):
+ return getattr(self.parent.obj, self.name)
+
+ def getmodpath(self, stopatmodule=True, includemodule=False):
+ """ return python path relative to the containing module. """
+ chain = self.listchain()
+ chain.reverse()
+ parts = []
+ for node in chain:
+ if isinstance(node, Instance):
+ continue
+ name = node.name
+ if isinstance(node, Module):
+ assert name.endswith(".py")
+ name = name[:-3]
+ if stopatmodule:
+ if includemodule:
+ parts.append(name)
+ break
+ parts.append(name)
+ parts.reverse()
+ s = ".".join(parts)
+ return s.replace(".[", "[")
+
+ def _getfslineno(self):
+ return getfslineno(self.obj)
+
+ def reportinfo(self):
+ # XXX caching?
+ obj = self.obj
+ compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
+ if isinstance(compat_co_firstlineno, int):
+ # nose compatibility
+ fspath = sys.modules[obj.__module__].__file__
+ if fspath.endswith(".pyc"):
+ fspath = fspath[:-1]
+ lineno = compat_co_firstlineno
+ else:
+ fspath, lineno = getfslineno(obj)
+ modpath = self.getmodpath()
+ assert isinstance(lineno, int)
+ return fspath, lineno, modpath
+
+class PyCollector(PyobjMixin, pytest.Collector):
+
+ def funcnamefilter(self, name):
+ return self._matches_prefix_or_glob_option('python_functions', name)
+
+ def isnosetest(self, obj):
+ """ Look for the __test__ attribute, which is applied by the
+ @nose.tools.istest decorator
+ """
+ # We explicitly check for "is True" here to not mistakenly treat
+ # classes with a custom __getattr__ returning something truthy (like a
+ # function) as test classes.
+ return safe_getattr(obj, '__test__', False) is True
+
+ def classnamefilter(self, name):
+ return self._matches_prefix_or_glob_option('python_classes', name)
+
+ def istestfunction(self, obj, name):
+ return (
+ (self.funcnamefilter(name) or self.isnosetest(obj)) and
+ safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
+ )
+
+ def istestclass(self, obj, name):
+ return self.classnamefilter(name) or self.isnosetest(obj)
+
+ def _matches_prefix_or_glob_option(self, option_name, name):
+ """
+ checks if the given name matches the prefix or glob-pattern defined
+ in ini configuration.
+ """
+ for option in self.config.getini(option_name):
+ if name.startswith(option):
+ return True
+ # check that name looks like a glob-string before calling fnmatch
+ # because this is called for every name in each collected module,
+ # and fnmatch is somewhat expensive to call
+ elif ('*' in option or '?' in option or '[' in option) and \
+ fnmatch.fnmatch(name, option):
+ return True
+ return False
+
+ def collect(self):
+ if not getattr(self.obj, "__test__", True):
+ return []
+
+ # NB. we avoid random getattrs and peek in the __dict__ instead
+ # (XXX originally introduced from a PyPy need, still true?)
+ dicts = [getattr(self.obj, '__dict__', {})]
+ for basecls in inspect.getmro(self.obj.__class__):
+ dicts.append(basecls.__dict__)
+ seen = {}
+ l = []
+ for dic in dicts:
+ for name, obj in list(dic.items()):
+ if name in seen:
+ continue
+ seen[name] = True
+ res = self.makeitem(name, obj)
+ if res is None:
+ continue
+ if not isinstance(res, list):
+ res = [res]
+ l.extend(res)
+ l.sort(key=lambda item: item.reportinfo()[:2])
+ return l
+
+ def makeitem(self, name, obj):
+ #assert self.ihook.fspath == self.fspath, self
+ return self.ihook.pytest_pycollect_makeitem(
+ collector=self, name=name, obj=obj)
+
+ def _genfunctions(self, name, funcobj):
+ module = self.getparent(Module).obj
+ clscol = self.getparent(Class)
+ cls = clscol and clscol.obj or None
+ transfer_markers(funcobj, cls, module)
+ fm = self.session._fixturemanager
+ fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
+ metafunc = Metafunc(funcobj, fixtureinfo, self.config,
+ cls=cls, module=module)
+ methods = []
+ if hasattr(module, "pytest_generate_tests"):
+ methods.append(module.pytest_generate_tests)
+ if hasattr(cls, "pytest_generate_tests"):
+ methods.append(cls().pytest_generate_tests)
+ if methods:
+ self.ihook.pytest_generate_tests.call_extra(methods,
+ dict(metafunc=metafunc))
+ else:
+ self.ihook.pytest_generate_tests(metafunc=metafunc)
+
+ Function = self._getcustomclass("Function")
+ if not metafunc._calls:
+ yield Function(name, parent=self, fixtureinfo=fixtureinfo)
+ else:
+ # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
+ add_funcarg_pseudo_fixture_def(self, metafunc, fm)
+
+ for callspec in metafunc._calls:
+ subname = "%s[%s]" %(name, callspec.id)
+ yield Function(name=subname, parent=self,
+ callspec=callspec, callobj=funcobj,
+ fixtureinfo=fixtureinfo,
+ keywords={callspec.id:True})
+
+def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
+ # this function will transform all collected calls to a functions
+ # if they use direct funcargs (i.e. direct parametrization)
+ # because we want later test execution to be able to rely on
+ # an existing FixtureDef structure for all arguments.
+ # XXX we can probably avoid this algorithm if we modify CallSpec2
+ # to directly care for creating the fixturedefs within its methods.
+ if not metafunc._calls[0].funcargs:
+ return # this function call does not have direct parametrization
+ # collect funcargs of all callspecs into a list of values
+ arg2params = {}
+ arg2scope = {}
+ for callspec in metafunc._calls:
+ for argname, argvalue in callspec.funcargs.items():
+ assert argname not in callspec.params
+ callspec.params[argname] = argvalue
+ arg2params_list = arg2params.setdefault(argname, [])
+ callspec.indices[argname] = len(arg2params_list)
+ arg2params_list.append(argvalue)
+ if argname not in arg2scope:
+ scopenum = callspec._arg2scopenum.get(argname,
+ scopenum_function)
+ arg2scope[argname] = scopes[scopenum]
+ callspec.funcargs.clear()
+
+ # register artificial FixtureDef's so that later at test execution
+ # time we can rely on a proper FixtureDef to exist for fixture setup.
+ arg2fixturedefs = metafunc._arg2fixturedefs
+ for argname, valuelist in arg2params.items():
+ # if we have a scope that is higher than function we need
+ # to make sure we only ever create an according fixturedef on
+ # a per-scope basis. We thus store and cache the fixturedef on the
+ # node related to the scope.
+ scope = arg2scope[argname]
+ node = None
+ if scope != "function":
+ node = get_scope_node(collector, scope)
+ if node is None:
+ assert scope == "class" and isinstance(collector, Module)
+ # use module-level collector for class-scope (for now)
+ node = collector
+ if node and argname in node._name2pseudofixturedef:
+ arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
+ else:
+ fixturedef = FixtureDef(fixturemanager, '', argname,
+ get_direct_param_fixture_func,
+ arg2scope[argname],
+ valuelist, False, False)
+ arg2fixturedefs[argname] = [fixturedef]
+ if node is not None:
+ node._name2pseudofixturedef[argname] = fixturedef
+
+
+def get_direct_param_fixture_func(request):
+ return request.param
+
+class FuncFixtureInfo:
+ def __init__(self, argnames, names_closure, name2fixturedefs):
+ self.argnames = argnames
+ self.names_closure = names_closure
+ self.name2fixturedefs = name2fixturedefs
+
+
+def _marked(func, mark):
+ """ Returns True if :func: is already marked with :mark:, False otherwise.
+ This can happen if marker is applied to class and the test file is
+ invoked more than once.
+ """
+ try:
+ func_mark = getattr(func, mark.name)
+ except AttributeError:
+ return False
+ return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
+
+
+def transfer_markers(funcobj, cls, mod):
+ # XXX this should rather be code in the mark plugin or the mark
+ # plugin should merge with the python plugin.
+ for holder in (cls, mod):
+ try:
+ pytestmark = holder.pytestmark
+ except AttributeError:
+ continue
+ if isinstance(pytestmark, list):
+ for mark in pytestmark:
+ if not _marked(funcobj, mark):
+ mark(funcobj)
+ else:
+ if not _marked(funcobj, pytestmark):
+ pytestmark(funcobj)
+
+class Module(pytest.File, PyCollector):
+ """ Collector for test classes and functions. """
+ def _getobj(self):
+ return self._memoizedcall('_obj', self._importtestmodule)
+
+ def collect(self):
+ self.session._fixturemanager.parsefactories(self)
+ return super(Module, self).collect()
+
+ def _importtestmodule(self):
+ # we assume we are only called once per module
+ importmode = self.config.getoption("--import-mode")
+ try:
+ mod = self.fspath.pyimport(ensuresyspath=importmode)
+ except SyntaxError:
+ raise self.CollectError(
+ _pytest._code.ExceptionInfo().getrepr(style="short"))
+ except self.fspath.ImportMismatchError:
+ e = sys.exc_info()[1]
+ raise self.CollectError(
+ "import file mismatch:\n"
+ "imported module %r has this __file__ attribute:\n"
+ " %s\n"
+ "which is not the same as the test file we want to collect:\n"
+ " %s\n"
+ "HINT: remove __pycache__ / .pyc files and/or use a "
+ "unique basename for your test file modules"
+ % e.args
+ )
+ #print "imported test module", mod
+ self.config.pluginmanager.consider_module(mod)
+ return mod
+
+ def setup(self):
+ setup_module = xunitsetup(self.obj, "setUpModule")
+ if setup_module is None:
+ setup_module = xunitsetup(self.obj, "setup_module")
+ if setup_module is not None:
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, its probably a pytest style one
+ # so we pass the current module object
+ if _has_positional_arg(setup_module):
+ setup_module(self.obj)
+ else:
+ setup_module()
+ fin = getattr(self.obj, 'tearDownModule', None)
+ if fin is None:
+ fin = getattr(self.obj, 'teardown_module', None)
+ if fin is not None:
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, it's probably a pytest style one
+ # so we pass the current module object
+ if _has_positional_arg(fin):
+ finalizer = lambda: fin(self.obj)
+ else:
+ finalizer = fin
+ self.addfinalizer(finalizer)
+
+
+class Class(PyCollector):
+ """ Collector for test methods. """
+ def collect(self):
+ if hasinit(self.obj):
+ self.warn("C1", "cannot collect test class %r because it has a "
+ "__init__ constructor" % self.obj.__name__)
+ return []
+ return [self._getcustomclass("Instance")(name="()", parent=self)]
+
+ def setup(self):
+ setup_class = xunitsetup(self.obj, 'setup_class')
+ if setup_class is not None:
+ setup_class = getattr(setup_class, 'im_func', setup_class)
+ setup_class = getattr(setup_class, '__func__', setup_class)
+ setup_class(self.obj)
+
+ fin_class = getattr(self.obj, 'teardown_class', None)
+ if fin_class is not None:
+ fin_class = getattr(fin_class, 'im_func', fin_class)
+ fin_class = getattr(fin_class, '__func__', fin_class)
+ self.addfinalizer(lambda: fin_class(self.obj))
+
+class Instance(PyCollector):
+ def _getobj(self):
+ obj = self.parent.obj()
+ return obj
+
+ def collect(self):
+ self.session._fixturemanager.parsefactories(self)
+ return super(Instance, self).collect()
+
+ def newinstance(self):
+ self.obj = self._getobj()
+ return self.obj
+
+class FunctionMixin(PyobjMixin):
+ """ mixin for the code common to Function and Generator.
+ """
+
+ def setup(self):
+ """ perform setup for this test function. """
+ if hasattr(self, '_preservedparent'):
+ obj = self._preservedparent
+ elif isinstance(self.parent, Instance):
+ obj = self.parent.newinstance()
+ self.obj = self._getobj()
+ else:
+ obj = self.parent.obj
+ if inspect.ismethod(self.obj):
+ setup_name = 'setup_method'
+ teardown_name = 'teardown_method'
+ else:
+ setup_name = 'setup_function'
+ teardown_name = 'teardown_function'
+ setup_func_or_method = xunitsetup(obj, setup_name)
+ if setup_func_or_method is not None:
+ setup_func_or_method(self.obj)
+ fin = getattr(obj, teardown_name, None)
+ if fin is not None:
+ self.addfinalizer(lambda: fin(self.obj))
+
+ def _prunetraceback(self, excinfo):
+ if hasattr(self, '_obj') and not self.config.option.fulltrace:
+ code = _pytest._code.Code(get_real_func(self.obj))
+ path, firstlineno = code.path, code.firstlineno
+ traceback = excinfo.traceback
+ ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(path=path)
+ if ntraceback == traceback:
+ #ntraceback = ntraceback.cut(excludepath=cutdir2)
+ ntraceback = ntraceback.filter(filter_traceback)
+ if not ntraceback:
+ ntraceback = traceback
+
+ excinfo.traceback = ntraceback.filter()
+ # issue364: mark all but first and last frames to
+ # only show a single-line message for each frame
+ if self.config.option.tbstyle == "auto":
+ if len(excinfo.traceback) > 2:
+ for entry in excinfo.traceback[1:-1]:
+ entry.set_repr_style('short')
+
+ def _repr_failure_py(self, excinfo, style="long"):
+ if excinfo.errisinstance(pytest.fail.Exception):
+ if not excinfo.value.pytrace:
+ return py._builtin._totext(excinfo.value)
+ return super(FunctionMixin, self)._repr_failure_py(excinfo,
+ style=style)
+
+ def repr_failure(self, excinfo, outerr=None):
+ assert outerr is None, "XXX outerr usage is deprecated"
+ style = self.config.option.tbstyle
+ if style == "auto":
+ style = "long"
+ return self._repr_failure_py(excinfo, style=style)
+
+
+class Generator(FunctionMixin, PyCollector):
+ def collect(self):
+ # test generators are seen as collectors but they also
+ # invoke setup/teardown on popular request
+ # (induced by the common "test_*" naming shared with normal tests)
+ self.session._setupstate.prepare(self)
+ # see FunctionMixin.setup and test_setupstate_is_preserved_134
+ self._preservedparent = self.parent.obj
+ l = []
+ seen = {}
+ for i, x in enumerate(self.obj()):
+ name, call, args = self.getcallargs(x)
+ if not callable(call):
+ raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
+ if name is None:
+ name = "[%d]" % i
+ else:
+ name = "['%s']" % name
+ if name in seen:
+ raise ValueError("%r generated tests with non-unique name %r" %(self, name))
+ seen[name] = True
+ l.append(self.Function(name, self, args=args, callobj=call))
+ return l
+
+ def getcallargs(self, obj):
+ if not isinstance(obj, (tuple, list)):
+ obj = (obj,)
+ # explict naming
+ if isinstance(obj[0], py.builtin._basestring):
+ name = obj[0]
+ obj = obj[1:]
+ else:
+ name = None
+ call, args = obj[0], obj[1:]
+ return name, call, args
+
+
+def hasinit(obj):
+ init = getattr(obj, '__init__', None)
+ if init:
+ if init != object.__init__:
+ return True
+
+
+
+def fillfixtures(function):
+ """ fill missing funcargs for a test function. """
+ try:
+ request = function._request
+ except AttributeError:
+ # XXX this special code path is only expected to execute
+ # with the oejskit plugin. It uses classes with funcargs
+ # and we thus have to work a bit to allow this.
+ fm = function.session._fixturemanager
+ fi = fm.getfixtureinfo(function.parent, function.obj, None)
+ function._fixtureinfo = fi
+ request = function._request = FixtureRequest(function)
+ request._fillfixtures()
+ # prune out funcargs for jstests
+ newfuncargs = {}
+ for name in fi.argnames:
+ newfuncargs[name] = function.funcargs[name]
+ function.funcargs = newfuncargs
+ else:
+ request._fillfixtures()
+
+
+_notexists = object()
+
+class CallSpec2(object):
+ def __init__(self, metafunc):
+ self.metafunc = metafunc
+ self.funcargs = {}
+ self._idlist = []
+ self.params = {}
+ self._globalid = _notexists
+ self._globalid_args = set()
+ self._globalparam = _notexists
+ self._arg2scopenum = {} # used for sorting parametrized resources
+ self.keywords = {}
+ self.indices = {}
+
+ def copy(self, metafunc):
+ cs = CallSpec2(self.metafunc)
+ cs.funcargs.update(self.funcargs)
+ cs.params.update(self.params)
+ cs.keywords.update(self.keywords)
+ cs.indices.update(self.indices)
+ cs._arg2scopenum.update(self._arg2scopenum)
+ cs._idlist = list(self._idlist)
+ cs._globalid = self._globalid
+ cs._globalid_args = self._globalid_args
+ cs._globalparam = self._globalparam
+ return cs
+
+ def _checkargnotcontained(self, arg):
+ if arg in self.params or arg in self.funcargs:
+ raise ValueError("duplicate %r" %(arg,))
+
+ def getparam(self, name):
+ try:
+ return self.params[name]
+ except KeyError:
+ if self._globalparam is _notexists:
+ raise ValueError(name)
+ return self._globalparam
+
+ @property
+ def id(self):
+ return "-".join(map(str, filter(None, self._idlist)))
+
+ def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
+ param_index):
+ for arg,val in zip(argnames, valset):
+ self._checkargnotcontained(arg)
+ valtype_for_arg = valtypes[arg]
+ getattr(self, valtype_for_arg)[arg] = val
+ self.indices[arg] = param_index
+ self._arg2scopenum[arg] = scopenum
+ if val is _notexists:
+ self._emptyparamspecified = True
+ self._idlist.append(id)
+ self.keywords.update(keywords)
+
+ def setall(self, funcargs, id, param):
+ for x in funcargs:
+ self._checkargnotcontained(x)
+ self.funcargs.update(funcargs)
+ if id is not _notexists:
+ self._idlist.append(id)
+ if param is not _notexists:
+ assert self._globalparam is _notexists
+ self._globalparam = param
+ for arg in funcargs:
+ self._arg2scopenum[arg] = scopenum_function
+
+
+class FuncargnamesCompatAttr:
+ """ helper class so that Metafunc, Function and FixtureRequest
+ don't need to each define the "funcargnames" compatibility attribute.
+ """
+ @property
+ def funcargnames(self):
+ """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+ return self.fixturenames
+
+class Metafunc(FuncargnamesCompatAttr):
+ """
+ Metafunc objects are passed to the ``pytest_generate_tests`` hook.
+ They help to inspect a test function and to generate tests according to
+ test configuration or values specified in the class or module where a
+ test function is defined.
+
+ :ivar fixturenames: set of fixture names required by the test function
+
+ :ivar function: underlying python test function
+
+ :ivar cls: class object where the test function is defined in or ``None``.
+
+ :ivar module: the module object where the test function is defined in.
+
+ :ivar config: access to the :class:`_pytest.config.Config` object for the
+ test session.
+
+ :ivar funcargnames:
+ .. deprecated:: 2.3
+ Use ``fixturenames`` instead.
+ """
+ def __init__(self, function, fixtureinfo, config, cls=None, module=None):
+ self.config = config
+ self.module = module
+ self.function = function
+ self.fixturenames = fixtureinfo.names_closure
+ self._arg2fixturedefs = fixtureinfo.name2fixturedefs
+ self.cls = cls
+ self._calls = []
+ self._ids = py.builtin.set()
+
+ def parametrize(self, argnames, argvalues, indirect=False, ids=None,
+ scope=None):
+ """ Add new invocations to the underlying test function using the list
+ of argvalues for the given argnames. Parametrization is performed
+ during the collection phase. If you need to setup expensive resources
+ see about setting indirect to do it rather at test setup time.
+
+ :arg argnames: a comma-separated string denoting one or more argument
+ names, or a list/tuple of argument strings.
+
+ :arg argvalues: The list of argvalues determines how often a
+ test is invoked with different argument values. If only one
+ argname was specified argvalues is a list of values. If N
+ argnames were specified, argvalues must be a list of N-tuples,
+ where each tuple-element specifies a value for its respective
+ argname.
+
+ :arg indirect: The list of argnames or boolean. A list of arguments'
+ names (subset of argnames). If True the list contains all names from
+ the argnames. Each argvalue corresponding to an argname in this list will
+ be passed as request.param to its respective argname fixture
+ function so that it can perform more expensive setups during the
+ setup phase of a test rather than at collection time.
+
+ :arg ids: list of string ids, or a callable.
+ If strings, each is corresponding to the argvalues so that they are
+ part of the test id.
+ If callable, it should take one argument (a single argvalue) and return
+ a string or return None. If None, the automatically generated id for that
+ argument will be used.
+ If no ids are provided they will be generated automatically from
+ the argvalues.
+
+ :arg scope: if specified it denotes the scope of the parameters.
+ The scope is used for grouping tests by parameter instances.
+ It will also override any fixture-function defined scope, allowing
+ to set a dynamic scope using test context or configuration.
+ """
+
+ # individual parametrized argument sets can be wrapped in a series
+ # of markers in which case we unwrap the values and apply the mark
+ # at Function init
+ newkeywords = {}
+ unwrapped_argvalues = []
+ for i, argval in enumerate(argvalues):
+ while isinstance(argval, MarkDecorator):
+ newmark = MarkDecorator(argval.markname,
+ argval.args[:-1], argval.kwargs)
+ newmarks = newkeywords.setdefault(i, {})
+ newmarks[newmark.markname] = newmark
+ argval = argval.args[-1]
+ unwrapped_argvalues.append(argval)
+ argvalues = unwrapped_argvalues
+
+ if not isinstance(argnames, (tuple, list)):
+ argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+ if len(argnames) == 1:
+ argvalues = [(val,) for val in argvalues]
+ if not argvalues:
+ argvalues = [(_notexists,) * len(argnames)]
+
+ if scope is None:
+ scope = "function"
+ scopenum = scopes.index(scope)
+ valtypes = {}
+ for arg in argnames:
+ if arg not in self.fixturenames:
+ raise ValueError("%r uses no fixture %r" %(self.function, arg))
+
+ if indirect is True:
+ valtypes = dict.fromkeys(argnames, "params")
+ elif indirect is False:
+ valtypes = dict.fromkeys(argnames, "funcargs")
+ elif isinstance(indirect, (tuple, list)):
+ valtypes = dict.fromkeys(argnames, "funcargs")
+ for arg in indirect:
+ if arg not in argnames:
+ raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
+ self.function, arg))
+ valtypes[arg] = "params"
+ idfn = None
+ if callable(ids):
+ idfn = ids
+ ids = None
+ if ids and len(ids) != len(argvalues):
+ raise ValueError('%d tests specified with %d ids' %(
+ len(argvalues), len(ids)))
+ if not ids:
+ ids = idmaker(argnames, argvalues, idfn)
+ newcalls = []
+ for callspec in self._calls or [CallSpec2(self)]:
+ for param_index, valset in enumerate(argvalues):
+ assert len(valset) == len(argnames)
+ newcallspec = callspec.copy(self)
+ newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
+ newkeywords.get(param_index, {}), scopenum,
+ param_index)
+ newcalls.append(newcallspec)
+ self._calls = newcalls
+
+ def addcall(self, funcargs=None, id=_notexists, param=_notexists):
+ """ (deprecated, use parametrize) Add a new call to the underlying
+ test function during the collection phase of a test run. Note that
+ request.addcall() is called during the test collection phase prior and
+ independently to actual test execution. You should only use addcall()
+ if you need to specify multiple arguments of a test function.
+
+ :arg funcargs: argument keyword dictionary used when invoking
+ the test function.
+
+ :arg id: used for reporting and identification purposes. If you
+ don't supply an `id` an automatic unique id will be generated.
+
+ :arg param: a parameter which will be exposed to a later fixture function
+ invocation through the ``request.param`` attribute.
+ """
+ assert funcargs is None or isinstance(funcargs, dict)
+ if funcargs is not None:
+ for name in funcargs:
+ if name not in self.fixturenames:
+ pytest.fail("funcarg %r not used in this function." % name)
+ else:
+ funcargs = {}
+ if id is None:
+ raise ValueError("id=None not allowed")
+ if id is _notexists:
+ id = len(self._calls)
+ id = str(id)
+ if id in self._ids:
+ raise ValueError("duplicate id %r" % id)
+ self._ids.add(id)
+
+ cs = CallSpec2(self)
+ cs.setall(funcargs, id, param)
+ self._calls.append(cs)
+
+
+if _PY3:
+ import codecs
+
+ def _escape_bytes(val):
+ """
+ If val is pure ascii, returns it as a str(), otherwise escapes
+ into a sequence of escaped bytes:
+ b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
+
+ note:
+ the obvious "v.decode('unicode-escape')" will return
+ valid utf-8 unicode if it finds them in the string, but we
+ want to return escaped bytes for any byte, even if they match
+ a utf-8 string.
+ """
+ if val:
+ # source: http://goo.gl/bGsnwC
+ encoded_bytes, _ = codecs.escape_encode(val)
+ return encoded_bytes.decode('ascii')
+ else:
+ # empty bytes crashes codecs.escape_encode (#1087)
+ return ''
+else:
+ def _escape_bytes(val):
+ """
+ In py2 bytes and str are the same type, so return it unchanged if it
+ is a full ascii string, otherwise escape it into its binary form.
+ """
+ try:
+ return val.decode('ascii')
+ except UnicodeDecodeError:
+ return val.encode('string-escape')
+
+
+def _idval(val, argname, idx, idfn):
+ if idfn:
+ try:
+ s = idfn(val)
+ if s:
+ return s
+ except Exception:
+ pass
+
+ if isinstance(val, bytes):
+ return _escape_bytes(val)
+ elif isinstance(val, (float, int, str, bool, NoneType)):
+ return str(val)
+ elif isinstance(val, REGEX_TYPE):
+ return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
+ elif enum is not None and isinstance(val, enum.Enum):
+ return str(val)
+ elif isclass(val) and hasattr(val, '__name__'):
+ return val.__name__
+ elif _PY2 and isinstance(val, unicode):
+ # special case for python 2: if a unicode string is
+ # convertible to ascii, return it as an str() object instead
+ try:
+ return str(val)
+ except UnicodeError:
+ # fallthrough
+ pass
+ return str(argname)+str(idx)
+
+def _idvalset(idx, valset, argnames, idfn):
+ this_id = [_idval(val, argname, idx, idfn)
+ for val, argname in zip(valset, argnames)]
+ return "-".join(this_id)
+
+def idmaker(argnames, argvalues, idfn=None):
+ ids = [_idvalset(valindex, valset, argnames, idfn)
+ for valindex, valset in enumerate(argvalues)]
+ if len(set(ids)) < len(ids):
+ # user may have provided a bad idfn which means the ids are not unique
+ ids = [str(i) + testid for i, testid in enumerate(ids)]
+ return ids
+
+def showfixtures(config):
+ from _pytest.main import wrap_session
+ return wrap_session(config, _showfixtures_main)
+
+def _showfixtures_main(config, session):
+ import _pytest.config
+ session.perform_collect()
+ curdir = py.path.local()
+ tw = _pytest.config.create_terminal_writer(config)
+ verbose = config.getvalue("verbose")
+
+ fm = session._fixturemanager
+
+ available = []
+ for argname, fixturedefs in fm._arg2fixturedefs.items():
+ assert fixturedefs is not None
+ if not fixturedefs:
+ continue
+ fixturedef = fixturedefs[-1]
+ loc = getlocation(fixturedef.func, curdir)
+ available.append((len(fixturedef.baseid),
+ fixturedef.func.__module__,
+ curdir.bestrelpath(loc),
+ fixturedef.argname, fixturedef))
+
+ available.sort()
+ currentmodule = None
+ for baseid, module, bestrel, argname, fixturedef in available:
+ if currentmodule != module:
+ if not module.startswith("_pytest."):
+ tw.line()
+ tw.sep("-", "fixtures defined from %s" %(module,))
+ currentmodule = module
+ if verbose <= 0 and argname[0] == "_":
+ continue
+ if verbose > 0:
+ funcargspec = "%s -- %s" %(argname, bestrel,)
+ else:
+ funcargspec = argname
+ tw.line(funcargspec, green=True)
+ loc = getlocation(fixturedef.func, curdir)
+ doc = fixturedef.func.__doc__ or ""
+ if doc:
+ for line in doc.strip().split("\n"):
+ tw.line(" " + line.strip())
+ else:
+ tw.line(" %s: no docstring available" %(loc,),
+ red=True)
+
+def getlocation(function, curdir):
+ import inspect
+ fn = py.path.local(inspect.getfile(function))
+ lineno = py.builtin._getcode(function).co_firstlineno
+ if fn.relto(curdir):
+ fn = fn.relto(curdir)
+ return "%s:%d" %(fn, lineno+1)
+
+# builtin pytest.raises helper
+
+def raises(expected_exception, *args, **kwargs):
+ """ assert that a code block/function call raises ``expected_exception``
+ and raise a failure exception otherwise.
+
+ This helper produces a ``ExceptionInfo()`` object (see below).
+
+ If using Python 2.5 or above, you may use this function as a
+ context manager::
+
+ >>> with raises(ZeroDivisionError):
+ ... 1/0
+
+ .. note::
+
+ When using ``pytest.raises`` as a context manager, it's worthwhile to
+ note that normal context manager rules apply and that the exception
+ raised *must* be the final line in the scope of the context manager.
+ Lines of code after that, within the scope of the context manager will
+ not be executed. For example::
+
+ >>> with raises(OSError) as exc_info:
+ assert 1 == 1 # this will execute as expected
+ raise OSError(errno.EEXISTS, 'directory exists')
+ assert exc_info.value.errno == errno.EEXISTS # this will not execute
+
+ Instead, the following approach must be taken (note the difference in
+ scope)::
+
+ >>> with raises(OSError) as exc_info:
+ assert 1 == 1 # this will execute as expected
+ raise OSError(errno.EEXISTS, 'directory exists')
+
+ assert exc_info.value.errno == errno.EEXISTS # this will now execute
+
+ Or you can specify a callable by passing a to-be-called lambda::
+
+ >>> raises(ZeroDivisionError, lambda: 1/0)
+ <ExceptionInfo ...>
+
+ or you can specify an arbitrary callable with arguments::
+
+ >>> def f(x): return 1/x
+ ...
+ >>> raises(ZeroDivisionError, f, 0)
+ <ExceptionInfo ...>
+ >>> raises(ZeroDivisionError, f, x=0)
+ <ExceptionInfo ...>
+
+ A third possibility is to use a string to be executed::
+
+ >>> raises(ZeroDivisionError, "f(0)")
+ <ExceptionInfo ...>
+
+ .. autoclass:: _pytest._code.ExceptionInfo
+ :members:
+
+ .. note::
+ Similar to caught exception objects in Python, explicitly clearing
+ local references to returned ``ExceptionInfo`` objects can
+ help the Python interpreter speed up its garbage collection.
+
+ Clearing those references breaks a reference cycle
+ (``ExceptionInfo`` --> caught exception --> frame stack raising
+ the exception --> current frame stack --> local variables -->
+ ``ExceptionInfo``) which makes Python keep all objects referenced
+ from that cycle (including all local variables in the current
+ frame) alive until the next cyclic garbage collection run. See the
+ official Python ``try`` statement documentation for more detailed
+ information.
+
+ """
+ __tracebackhide__ = True
+ if expected_exception is AssertionError:
+ # we want to catch a AssertionError
+ # replace our subclass with the builtin one
+ # see https://github.com/pytest-dev/pytest/issues/176
+ from _pytest.assertion.util import BuiltinAssertionError \
+ as expected_exception
+ msg = ("exceptions must be old-style classes or"
+ " derived from BaseException, not %s")
+ if isinstance(expected_exception, tuple):
+ for exc in expected_exception:
+ if not isclass(exc):
+ raise TypeError(msg % type(exc))
+ elif not isclass(expected_exception):
+ raise TypeError(msg % type(expected_exception))
+
+ if not args:
+ return RaisesContext(expected_exception)
+ elif isinstance(args[0], str):
+ code, = args
+ assert isinstance(code, str)
+ frame = sys._getframe(1)
+ loc = frame.f_locals.copy()
+ loc.update(kwargs)
+ #print "raises frame scope: %r" % frame.f_locals
+ try:
+ code = _pytest._code.Source(code).compile()
+ py.builtin.exec_(code, frame.f_globals, loc)
+ # XXX didn'T mean f_globals == f_locals something special?
+ # this is destroyed here ...
+ except expected_exception:
+ return _pytest._code.ExceptionInfo()
+ else:
+ func = args[0]
+ try:
+ func(*args[1:], **kwargs)
+ except expected_exception:
+ return _pytest._code.ExceptionInfo()
+ pytest.fail("DID NOT RAISE {0}".format(expected_exception))
+
+class RaisesContext(object):
+ def __init__(self, expected_exception):
+ self.expected_exception = expected_exception
+ self.excinfo = None
+
+ def __enter__(self):
+ self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
+ return self.excinfo
+
+ def __exit__(self, *tp):
+ __tracebackhide__ = True
+ if tp[0] is None:
+ pytest.fail("DID NOT RAISE")
+ if sys.version_info < (2, 7):
+ # py26: on __exit__() exc_value often does not contain the
+ # exception value.
+ # http://bugs.python.org/issue7853
+ if not isinstance(tp[1], BaseException):
+ exc_type, value, traceback = tp
+ tp = exc_type, exc_type(value), traceback
+ self.excinfo.__init__(tp)
+ return issubclass(self.excinfo.type, self.expected_exception)
+
+#
+# the basic pytest Function item
+#
+
+class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
+ """ a Function Item is responsible for setting up and executing a
+ Python test function.
+ """
+ _genid = None
+ def __init__(self, name, parent, args=None, config=None,
+ callspec=None, callobj=NOTSET, keywords=None, session=None,
+ fixtureinfo=None):
+ super(Function, self).__init__(name, parent, config=config,
+ session=session)
+ self._args = args
+ if callobj is not NOTSET:
+ self.obj = callobj
+
+ self.keywords.update(self.obj.__dict__)
+ if callspec:
+ self.callspec = callspec
+ self.keywords.update(callspec.keywords)
+ if keywords:
+ self.keywords.update(keywords)
+
+ if fixtureinfo is None:
+ fixtureinfo = self.session._fixturemanager.getfixtureinfo(
+ self.parent, self.obj, self.cls,
+ funcargs=not self._isyieldedfunction())
+ self._fixtureinfo = fixtureinfo
+ self.fixturenames = fixtureinfo.names_closure
+ self._initrequest()
+
+ def _initrequest(self):
+ self.funcargs = {}
+ if self._isyieldedfunction():
+ assert not hasattr(self, "callspec"), (
+ "yielded functions (deprecated) cannot have funcargs")
+ else:
+ if hasattr(self, "callspec"):
+ callspec = self.callspec
+ assert not callspec.funcargs
+ self._genid = callspec.id
+ if hasattr(callspec, "param"):
+ self.param = callspec.param
+ self._request = FixtureRequest(self)
+
+ @property
+ def function(self):
+ "underlying python 'function' object"
+ return getattr(self.obj, 'im_func', self.obj)
+
+ def _getobj(self):
+ name = self.name
+ i = name.find("[") # parametrization
+ if i != -1:
+ name = name[:i]
+ return getattr(self.parent.obj, name)
+
+ @property
+ def _pyfuncitem(self):
+ "(compatonly) for code expecting pytest-2.2 style request objects"
+ return self
+
+ def _isyieldedfunction(self):
+ return getattr(self, "_args", None) is not None
+
+ def runtest(self):
+ """ execute the underlying test function. """
+ self.ihook.pytest_pyfunc_call(pyfuncitem=self)
+
+ def setup(self):
+ # check if parametrization happend with an empty list
+ try:
+ self.callspec._emptyparamspecified
+ except AttributeError:
+ pass
+ else:
+ fs, lineno = self._getfslineno()
+ pytest.skip("got empty parameter set, function %s at %s:%d" %(
+ self.function.__name__, fs, lineno))
+ super(Function, self).setup()
+ fillfixtures(self)
+
+
+scope2props = dict(session=())
+scope2props["module"] = ("fspath", "module")
+scope2props["class"] = scope2props["module"] + ("cls",)
+scope2props["instance"] = scope2props["class"] + ("instance", )
+scope2props["function"] = scope2props["instance"] + ("function", "keywords")
+
+def scopeproperty(name=None, doc=None):
+ def decoratescope(func):
+ scopename = name or func.__name__
+ def provide(self):
+ if func.__name__ in scope2props[self.scope]:
+ return func(self)
+ raise AttributeError("%s not available in %s-scoped context" % (
+ scopename, self.scope))
+ return property(provide, None, None, func.__doc__)
+ return decoratescope
+
+
+class FixtureRequest(FuncargnamesCompatAttr):
+ """ A request for a fixture from a test or fixture function.
+
+ A request object gives access to the requesting test context
+ and has an optional ``param`` attribute in case
+ the fixture is parametrized indirectly.
+ """
+
+ def __init__(self, pyfuncitem):
+ self._pyfuncitem = pyfuncitem
+ #: fixture for which this request is being performed
+ self.fixturename = None
+ #: Scope string, one of "function", "cls", "module", "session"
+ self.scope = "function"
+ self._funcargs = {}
+ self._fixturedefs = {}
+ fixtureinfo = pyfuncitem._fixtureinfo
+ self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
+ self._arg2index = {}
+ self.fixturenames = fixtureinfo.names_closure
+ self._fixturemanager = pyfuncitem.session._fixturemanager
+
+ @property
+ def node(self):
+ """ underlying collection node (depends on current request scope)"""
+ return self._getscopeitem(self.scope)
+
+
+ def _getnextfixturedef(self, argname):
+ fixturedefs = self._arg2fixturedefs.get(argname, None)
+ if fixturedefs is None:
+ # we arrive here because of a a dynamic call to
+ # getfuncargvalue(argname) usage which was naturally
+ # not known at parsing/collection time
+ fixturedefs = self._fixturemanager.getfixturedefs(
+ argname, self._pyfuncitem.parent.nodeid)
+ self._arg2fixturedefs[argname] = fixturedefs
+ # fixturedefs list is immutable so we maintain a decreasing index
+ index = self._arg2index.get(argname, 0) - 1
+ if fixturedefs is None or (-index > len(fixturedefs)):
+ raise FixtureLookupError(argname, self)
+ self._arg2index[argname] = index
+ return fixturedefs[index]
+
+ @property
+ def config(self):
+ """ the pytest config object associated with this request. """
+ return self._pyfuncitem.config
+
+
+ @scopeproperty()
+ def function(self):
+ """ test function object if the request has a per-function scope. """
+ return self._pyfuncitem.obj
+
+ @scopeproperty("class")
+ def cls(self):
+ """ class (can be None) where the test function was collected. """
+ clscol = self._pyfuncitem.getparent(pytest.Class)
+ if clscol:
+ return clscol.obj
+
+ @property
+ def instance(self):
+ """ instance (can be None) on which test function was collected. """
+ # unittest support hack, see _pytest.unittest.TestCaseFunction
+ try:
+ return self._pyfuncitem._testcase
+ except AttributeError:
+ function = getattr(self, "function", None)
+ if function is not None:
+ return py.builtin._getimself(function)
+
+ @scopeproperty()
+ def module(self):
+ """ python module object where the test function was collected. """
+ return self._pyfuncitem.getparent(pytest.Module).obj
+
+ @scopeproperty()
+ def fspath(self):
+ """ the file system path of the test module which collected this test. """
+ return self._pyfuncitem.fspath
+
+ @property
+ def keywords(self):
+ """ keywords/markers dictionary for the underlying node. """
+ return self.node.keywords
+
+ @property
+ def session(self):
+ """ pytest session object. """
+ return self._pyfuncitem.session
+
+ def addfinalizer(self, finalizer):
+ """ add finalizer/teardown function to be called after the
+ last test within the requesting test context finished
+ execution. """
+ # XXX usually this method is shadowed by fixturedef specific ones
+ self._addfinalizer(finalizer, scope=self.scope)
+
+ def _addfinalizer(self, finalizer, scope):
+ colitem = self._getscopeitem(scope)
+ self._pyfuncitem.session._setupstate.addfinalizer(
+ finalizer=finalizer, colitem=colitem)
+
+ def applymarker(self, marker):
+ """ Apply a marker to a single test function invocation.
+ This method is useful if you don't want to have a keyword/marker
+ on all function invocations.
+
+ :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
+ created by a call to ``pytest.mark.NAME(...)``.
+ """
+ try:
+ self.node.keywords[marker.markname] = marker
+ except AttributeError:
+ raise ValueError(marker)
+
+ def raiseerror(self, msg):
+ """ raise a FixtureLookupError with the given message. """
+ raise self._fixturemanager.FixtureLookupError(None, self, msg)
+
+ def _fillfixtures(self):
+ item = self._pyfuncitem
+ fixturenames = getattr(item, "fixturenames", self.fixturenames)
+ for argname in fixturenames:
+ if argname not in item.funcargs:
+ item.funcargs[argname] = self.getfuncargvalue(argname)
+
+ def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
+ """ (deprecated) Return a testing resource managed by ``setup`` &
+ ``teardown`` calls. ``scope`` and ``extrakey`` determine when the
+ ``teardown`` function will be called so that subsequent calls to
+ ``setup`` would recreate the resource. With pytest-2.3 you often
+ do not need ``cached_setup()`` as you can directly declare a scope
+ on a fixture function and register a finalizer through
+ ``request.addfinalizer()``.
+
+ :arg teardown: function receiving a previously setup resource.
+ :arg setup: a no-argument function creating a resource.
+ :arg scope: a string value out of ``function``, ``class``, ``module``
+ or ``session`` indicating the caching lifecycle of the resource.
+ :arg extrakey: added to internal caching key of (funcargname, scope).
+ """
+ if not hasattr(self.config, '_setupcache'):
+ self.config._setupcache = {} # XXX weakref?
+ cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
+ cache = self.config._setupcache
+ try:
+ val = cache[cachekey]
+ except KeyError:
+ self._check_scope(self.fixturename, self.scope, scope)
+ val = setup()
+ cache[cachekey] = val
+ if teardown is not None:
+ def finalizer():
+ del cache[cachekey]
+ teardown(val)
+ self._addfinalizer(finalizer, scope=scope)
+ return val
+
+ def getfuncargvalue(self, argname):
+ """ Dynamically retrieve a named fixture function argument.
+
+ As of pytest-2.3, it is easier and usually better to access other
+ fixture values by stating it as an input argument in the fixture
+ function. If you only can decide about using another fixture at test
+ setup time, you may use this function to retrieve it inside a fixture
+ function body.
+ """
+ return self._get_active_fixturedef(argname).cached_result[0]
+
+ def _get_active_fixturedef(self, argname):
+ try:
+ return self._fixturedefs[argname]
+ except KeyError:
+ try:
+ fixturedef = self._getnextfixturedef(argname)
+ except FixtureLookupError:
+ if argname == "request":
+ class PseudoFixtureDef:
+ cached_result = (self, [0], None)
+ scope = "function"
+ return PseudoFixtureDef
+ raise
+ # remove indent to prevent the python3 exception
+ # from leaking into the call
+ result = self._getfuncargvalue(fixturedef)
+ self._funcargs[argname] = result
+ self._fixturedefs[argname] = fixturedef
+ return fixturedef
+
+ def _get_fixturestack(self):
+ current = self
+ l = []
+ while 1:
+ fixturedef = getattr(current, "_fixturedef", None)
+ if fixturedef is None:
+ l.reverse()
+ return l
+ l.append(fixturedef)
+ current = current._parent_request
+
+ def _getfuncargvalue(self, fixturedef):
+ # prepare a subrequest object before calling fixture function
+ # (latter managed by fixturedef)
+ argname = fixturedef.argname
+ funcitem = self._pyfuncitem
+ scope = fixturedef.scope
+ try:
+ param = funcitem.callspec.getparam(argname)
+ except (AttributeError, ValueError):
+ param = NOTSET
+ param_index = 0
+ else:
+ # indices might not be set if old-style metafunc.addcall() was used
+ param_index = funcitem.callspec.indices.get(argname, 0)
+ # if a parametrize invocation set a scope it will override
+ # the static scope defined with the fixture function
+ paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
+ if paramscopenum is not None:
+ scope = scopes[paramscopenum]
+
+ subrequest = SubRequest(self, scope, param, param_index, fixturedef)
+
+ # check if a higher-level scoped fixture accesses a lower level one
+ subrequest._check_scope(argname, self.scope, scope)
+
+ # clear sys.exc_info before invoking the fixture (python bug?)
+ # if its not explicitly cleared it will leak into the call
+ exc_clear()
+ try:
+ # call the fixture function
+ val = fixturedef.execute(request=subrequest)
+ finally:
+ # if fixture function failed it might have registered finalizers
+ self.session._setupstate.addfinalizer(fixturedef.finish,
+ subrequest.node)
+ return val
+
+ def _check_scope(self, argname, invoking_scope, requested_scope):
+ if argname == "request":
+ return
+ if scopemismatch(invoking_scope, requested_scope):
+ # try to report something helpful
+ lines = self._factorytraceback()
+ pytest.fail("ScopeMismatch: You tried to access the %r scoped "
+ "fixture %r with a %r scoped request object, "
+ "involved factories\n%s" %(
+ (requested_scope, argname, invoking_scope, "\n".join(lines))),
+ pytrace=False)
+
+ def _factorytraceback(self):
+ lines = []
+ for fixturedef in self._get_fixturestack():
+ factory = fixturedef.func
+ fs, lineno = getfslineno(factory)
+ p = self._pyfuncitem.session.fspath.bestrelpath(fs)
+ args = _format_args(factory)
+ lines.append("%s:%d: def %s%s" %(
+ p, lineno, factory.__name__, args))
+ return lines
+
+ def _getscopeitem(self, scope):
+ if scope == "function":
+ # this might also be a non-function Item despite its attribute name
+ return self._pyfuncitem
+ node = get_scope_node(self._pyfuncitem, scope)
+ if node is None and scope == "class":
+ # fallback to function item itself
+ node = self._pyfuncitem
+ assert node
+ return node
+
+ def __repr__(self):
+ return "<FixtureRequest for %r>" %(self.node)
+
+
+class SubRequest(FixtureRequest):
+ """ a sub request for handling getting a fixture from a
+ test function/fixture. """
+ def __init__(self, request, scope, param, param_index, fixturedef):
+ self._parent_request = request
+ self.fixturename = fixturedef.argname
+ if param is not NOTSET:
+ self.param = param
+ self.param_index = param_index
+ self.scope = scope
+ self._fixturedef = fixturedef
+ self.addfinalizer = fixturedef.addfinalizer
+ self._pyfuncitem = request._pyfuncitem
+ self._funcargs = request._funcargs
+ self._fixturedefs = request._fixturedefs
+ self._arg2fixturedefs = request._arg2fixturedefs
+ self._arg2index = request._arg2index
+ self.fixturenames = request.fixturenames
+ self._fixturemanager = request._fixturemanager
+
+ def __repr__(self):
+ return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
+
+
+class ScopeMismatchError(Exception):
+ """ A fixture function tries to use a different fixture function which
+ which has a lower scope (e.g. a Session one calls a function one)
+ """
+
+scopes = "session module class function".split()
+scopenum_function = scopes.index("function")
+def scopemismatch(currentscope, newscope):
+ return scopes.index(newscope) > scopes.index(currentscope)
+
+
+class FixtureLookupError(LookupError):
+ """ could not return a requested Fixture (missing or invalid). """
+ def __init__(self, argname, request, msg=None):
+ self.argname = argname
+ self.request = request
+ self.fixturestack = request._get_fixturestack()
+ self.msg = msg
+
+ def formatrepr(self):
+ tblines = []
+ addline = tblines.append
+ stack = [self.request._pyfuncitem.obj]
+ stack.extend(map(lambda x: x.func, self.fixturestack))
+ msg = self.msg
+ if msg is not None:
+ # the last fixture raise an error, let's present
+ # it at the requesting side
+ stack = stack[:-1]
+ for function in stack:
+ fspath, lineno = getfslineno(function)
+ try:
+ lines, _ = inspect.getsourcelines(get_real_func(function))
+ except (IOError, IndexError):
+ error_msg = "file %s, line %s: source code not available"
+ addline(error_msg % (fspath, lineno+1))
+ else:
+ addline("file %s, line %s" % (fspath, lineno+1))
+ for i, line in enumerate(lines):
+ line = line.rstrip()
+ addline(" " + line)
+ if line.lstrip().startswith('def'):
+ break
+
+ if msg is None:
+ fm = self.request._fixturemanager
+ available = []
+ for name, fixturedef in fm._arg2fixturedefs.items():
+ parentid = self.request._pyfuncitem.parent.nodeid
+ faclist = list(fm._matchfactories(fixturedef, parentid))
+ if faclist:
+ available.append(name)
+ msg = "fixture %r not found" % (self.argname,)
+ msg += "\n available fixtures: %s" %(", ".join(available),)
+ msg += "\n use 'py.test --fixtures [testpath]' for help on them."
+
+ return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
+
+class FixtureLookupErrorRepr(TerminalRepr):
+ def __init__(self, filename, firstlineno, tblines, errorstring, argname):
+ self.tblines = tblines
+ self.errorstring = errorstring
+ self.filename = filename
+ self.firstlineno = firstlineno
+ self.argname = argname
+
+ def toterminal(self, tw):
+ #tw.line("FixtureLookupError: %s" %(self.argname), red=True)
+ for tbline in self.tblines:
+ tw.line(tbline.rstrip())
+ for line in self.errorstring.split("\n"):
+ tw.line(" " + line.strip(), red=True)
+ tw.line()
+ tw.line("%s:%d" % (self.filename, self.firstlineno+1))
+
+class FixtureManager:
+ """
+ pytest fixtures definitions and information is stored and managed
+ from this class.
+
+ During collection fm.parsefactories() is called multiple times to parse
+ fixture function definitions into FixtureDef objects and internal
+ data structures.
+
+ During collection of test functions, metafunc-mechanics instantiate
+ a FuncFixtureInfo object which is cached per node/func-name.
+ This FuncFixtureInfo object is later retrieved by Function nodes
+ which themselves offer a fixturenames attribute.
+
+ The FuncFixtureInfo object holds information about fixtures and FixtureDefs
+ relevant for a particular function. An initial list of fixtures is
+ assembled like this:
+
+ - ini-defined usefixtures
+ - autouse-marked fixtures along the collection chain up from the function
+ - usefixtures markers at module/class/function level
+ - test function funcargs
+
+ Subsequently the funcfixtureinfo.fixturenames attribute is computed
+ as the closure of the fixtures needed to setup the initial fixtures,
+ i. e. fixtures needed by fixture functions themselves are appended
+ to the fixturenames list.
+
+ Upon the test-setup phases all fixturenames are instantiated, retrieved
+ by a lookup of their FuncFixtureInfo.
+ """
+
+ _argprefix = "pytest_funcarg__"
+ FixtureLookupError = FixtureLookupError
+ FixtureLookupErrorRepr = FixtureLookupErrorRepr
+
+ def __init__(self, session):
+ self.session = session
+ self.config = session.config
+ self._arg2fixturedefs = {}
+ self._holderobjseen = set()
+ self._arg2finish = {}
+ self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
+ session.config.pluginmanager.register(self, "funcmanage")
+
+
+ def getfixtureinfo(self, node, func, cls, funcargs=True):
+ if funcargs and not hasattr(node, "nofuncargs"):
+ if cls is not None:
+ startindex = 1
+ else:
+ startindex = None
+ argnames = getfuncargnames(func, startindex)
+ else:
+ argnames = ()
+ usefixtures = getattr(func, "usefixtures", None)
+ initialnames = argnames
+ if usefixtures is not None:
+ initialnames = usefixtures.args + initialnames
+ fm = node.session._fixturemanager
+ names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
+ node)
+ return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
+
+ def pytest_plugin_registered(self, plugin):
+ nodeid = None
+ try:
+ p = py.path.local(plugin.__file__)
+ except AttributeError:
+ pass
+ else:
+ # construct the base nodeid which is later used to check
+ # what fixtures are visible for particular tests (as denoted
+ # by their test id)
+ if p.basename.startswith("conftest.py"):
+ nodeid = p.dirpath().relto(self.config.rootdir)
+ if p.sep != "/":
+ nodeid = nodeid.replace(p.sep, "/")
+ self.parsefactories(plugin, nodeid)
+
+ def _getautousenames(self, nodeid):
+ """ return a tuple of fixture names to be used. """
+ autousenames = []
+ for baseid, basenames in self._nodeid_and_autousenames:
+ if nodeid.startswith(baseid):
+ if baseid:
+ i = len(baseid)
+ nextchar = nodeid[i:i+1]
+ if nextchar and nextchar not in ":/":
+ continue
+ autousenames.extend(basenames)
+ # make sure autousenames are sorted by scope, scopenum 0 is session
+ autousenames.sort(
+ key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
+ return autousenames
+
+ def getfixtureclosure(self, fixturenames, parentnode):
+ # collect the closure of all fixtures , starting with the given
+ # fixturenames as the initial set. As we have to visit all
+ # factory definitions anyway, we also return a arg2fixturedefs
+ # mapping so that the caller can reuse it and does not have
+ # to re-discover fixturedefs again for each fixturename
+ # (discovering matching fixtures for a given name/node is expensive)
+
+ parentid = parentnode.nodeid
+ fixturenames_closure = self._getautousenames(parentid)
+ def merge(otherlist):
+ for arg in otherlist:
+ if arg not in fixturenames_closure:
+ fixturenames_closure.append(arg)
+ merge(fixturenames)
+ arg2fixturedefs = {}
+ lastlen = -1
+ while lastlen != len(fixturenames_closure):
+ lastlen = len(fixturenames_closure)
+ for argname in fixturenames_closure:
+ if argname in arg2fixturedefs:
+ continue
+ fixturedefs = self.getfixturedefs(argname, parentid)
+ if fixturedefs:
+ arg2fixturedefs[argname] = fixturedefs
+ merge(fixturedefs[-1].argnames)
+ return fixturenames_closure, arg2fixturedefs
+
+ def pytest_generate_tests(self, metafunc):
+ for argname in metafunc.fixturenames:
+ faclist = metafunc._arg2fixturedefs.get(argname)
+ if faclist:
+ fixturedef = faclist[-1]
+ if fixturedef.params is not None:
+ func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
+ # skip directly parametrized arguments
+ argnames = func_params[0]
+ if not isinstance(argnames, (tuple, list)):
+ argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+ if argname not in func_params and argname not in argnames:
+ metafunc.parametrize(argname, fixturedef.params,
+ indirect=True, scope=fixturedef.scope,
+ ids=fixturedef.ids)
+ else:
+ continue # will raise FixtureLookupError at setup time
+
+ def pytest_collection_modifyitems(self, items):
+ # separate parametrized setups
+ items[:] = reorder_items(items)
+
+ def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
+ if nodeid is not NOTSET:
+ holderobj = node_or_obj
+ else:
+ holderobj = node_or_obj.obj
+ nodeid = node_or_obj.nodeid
+ if holderobj in self._holderobjseen:
+ return
+ self._holderobjseen.add(holderobj)
+ autousenames = []
+ for name in dir(holderobj):
+ obj = getattr(holderobj, name, None)
+ # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
+ # or are "@pytest.fixture" marked
+ marker = getfixturemarker(obj)
+ if marker is None:
+ if not name.startswith(self._argprefix):
+ continue
+ if not callable(obj):
+ continue
+ marker = defaultfuncargprefixmarker
+ name = name[len(self._argprefix):]
+ elif not isinstance(marker, FixtureFunctionMarker):
+ # magic globals with __getattr__ might have got us a wrong
+ # fixture attribute
+ continue
+ else:
+ assert not name.startswith(self._argprefix)
+ fixturedef = FixtureDef(self, nodeid, name, obj,
+ marker.scope, marker.params,
+ yieldctx=marker.yieldctx,
+ unittest=unittest, ids=marker.ids)
+ faclist = self._arg2fixturedefs.setdefault(name, [])
+ if fixturedef.has_location:
+ faclist.append(fixturedef)
+ else:
+ # fixturedefs with no location are at the front
+ # so this inserts the current fixturedef after the
+ # existing fixturedefs from external plugins but
+ # before the fixturedefs provided in conftests.
+ i = len([f for f in faclist if not f.has_location])
+ faclist.insert(i, fixturedef)
+ if marker.autouse:
+ autousenames.append(name)
+ if autousenames:
+ self._nodeid_and_autousenames.append((nodeid or '', autousenames))
+
+ def getfixturedefs(self, argname, nodeid):
+ try:
+ fixturedefs = self._arg2fixturedefs[argname]
+ except KeyError:
+ return None
+ else:
+ return tuple(self._matchfactories(fixturedefs, nodeid))
+
+ def _matchfactories(self, fixturedefs, nodeid):
+ for fixturedef in fixturedefs:
+ if nodeid.startswith(fixturedef.baseid):
+ yield fixturedef
+
+
+def fail_fixturefunc(fixturefunc, msg):
+ fs, lineno = getfslineno(fixturefunc)
+ location = "%s:%s" % (fs, lineno+1)
+ source = _pytest._code.Source(fixturefunc)
+ pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
+ pytrace=False)
+
+def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
+ if yieldctx:
+ if not is_generator(fixturefunc):
+ fail_fixturefunc(fixturefunc,
+ msg="yield_fixture requires yield statement in function")
+ iter = fixturefunc(**kwargs)
+ next = getattr(iter, "__next__", None)
+ if next is None:
+ next = getattr(iter, "next")
+ res = next()
+ def teardown():
+ try:
+ next()
+ except StopIteration:
+ pass
+ else:
+ fail_fixturefunc(fixturefunc,
+ "yield_fixture function has more than one 'yield'")
+ request.addfinalizer(teardown)
+ else:
+ if is_generator(fixturefunc):
+ fail_fixturefunc(fixturefunc,
+ msg="pytest.fixture functions cannot use ``yield``. "
+ "Instead write and return an inner function/generator "
+ "and let the consumer call and iterate over it.")
+ res = fixturefunc(**kwargs)
+ return res
+
+class FixtureDef:
+ """ A container for a factory definition. """
+ def __init__(self, fixturemanager, baseid, argname, func, scope, params,
+ yieldctx, unittest=False, ids=None):
+ self._fixturemanager = fixturemanager
+ self.baseid = baseid or ''
+ self.has_location = baseid is not None
+ self.func = func
+ self.argname = argname
+ self.scope = scope
+ self.scopenum = scopes.index(scope or "function")
+ self.params = params
+ startindex = unittest and 1 or None
+ self.argnames = getfuncargnames(func, startindex=startindex)
+ self.yieldctx = yieldctx
+ self.unittest = unittest
+ self.ids = ids
+ self._finalizer = []
+
+ def addfinalizer(self, finalizer):
+ self._finalizer.append(finalizer)
+
+ def finish(self):
+ try:
+ while self._finalizer:
+ func = self._finalizer.pop()
+ func()
+ finally:
+ # even if finalization fails, we invalidate
+ # the cached fixture value
+ if hasattr(self, "cached_result"):
+ del self.cached_result
+
+ def execute(self, request):
+ # get required arguments and register our own finish()
+ # with their finalization
+ kwargs = {}
+ for argname in self.argnames:
+ fixturedef = request._get_active_fixturedef(argname)
+ result, arg_cache_key, exc = fixturedef.cached_result
+ request._check_scope(argname, request.scope, fixturedef.scope)
+ kwargs[argname] = result
+ if argname != "request":
+ fixturedef.addfinalizer(self.finish)
+
+ my_cache_key = request.param_index
+ cached_result = getattr(self, "cached_result", None)
+ if cached_result is not None:
+ result, cache_key, err = cached_result
+ if my_cache_key == cache_key:
+ if err is not None:
+ py.builtin._reraise(*err)
+ else:
+ return result
+ # we have a previous but differently parametrized fixture instance
+ # so we need to tear it down before creating a new one
+ self.finish()
+ assert not hasattr(self, "cached_result")
+
+ fixturefunc = self.func
+
+ if self.unittest:
+ if request.instance is not None:
+ # bind the unbound method to the TestCase instance
+ fixturefunc = self.func.__get__(request.instance)
+ else:
+ # the fixture function needs to be bound to the actual
+ # request.instance so that code working with "self" behaves
+ # as expected.
+ if request.instance is not None:
+ fixturefunc = getimfunc(self.func)
+ if fixturefunc != self.func:
+ fixturefunc = fixturefunc.__get__(request.instance)
+
+ try:
+ result = call_fixture_func(fixturefunc, request, kwargs,
+ self.yieldctx)
+ except Exception:
+ self.cached_result = (None, my_cache_key, sys.exc_info())
+ raise
+ self.cached_result = (result, my_cache_key, None)
+ return result
+
+ def __repr__(self):
+ return ("<FixtureDef name=%r scope=%r baseid=%r >" %
+ (self.argname, self.scope, self.baseid))
+
+def num_mock_patch_args(function):
+ """ return number of arguments used up by mock arguments (if any) """
+ patchings = getattr(function, "patchings", None)
+ if not patchings:
+ return 0
+ mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
+ if mock is not None:
+ return len([p for p in patchings
+ if not p.attribute_name and p.new is mock.DEFAULT])
+ return len(patchings)
+
+
+def getfuncargnames(function, startindex=None):
+ # XXX merge with main.py's varnames
+ #assert not isclass(function)
+ realfunction = function
+ while hasattr(realfunction, "__wrapped__"):
+ realfunction = realfunction.__wrapped__
+ if startindex is None:
+ startindex = inspect.ismethod(function) and 1 or 0
+ if realfunction != function:
+ startindex += num_mock_patch_args(function)
+ function = realfunction
+ if isinstance(function, functools.partial):
+ argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
+ partial = function
+ argnames = argnames[len(partial.args):]
+ if partial.keywords:
+ for kw in partial.keywords:
+ argnames.remove(kw)
+ else:
+ argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
+ defaults = getattr(function, 'func_defaults',
+ getattr(function, '__defaults__', None)) or ()
+ numdefaults = len(defaults)
+ if numdefaults:
+ return tuple(argnames[startindex:-numdefaults])
+ return tuple(argnames[startindex:])
+
+# algorithm for sorting on a per-parametrized resource setup basis
+# it is called for scopenum==0 (session) first and performs sorting
+# down to the lower scopes such as to minimize number of "high scope"
+# setups and teardowns
+
+def reorder_items(items):
+ argkeys_cache = {}
+ for scopenum in range(0, scopenum_function):
+ argkeys_cache[scopenum] = d = {}
+ for item in items:
+ keys = set(get_parametrized_fixture_keys(item, scopenum))
+ if keys:
+ d[item] = keys
+ return reorder_items_atscope(items, set(), argkeys_cache, 0)
+
+def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
+ if scopenum >= scopenum_function or len(items) < 3:
+ return items
+ items_done = []
+ while 1:
+ items_before, items_same, items_other, newignore = \
+ slice_items(items, ignore, argkeys_cache[scopenum])
+ items_before = reorder_items_atscope(
+ items_before, ignore, argkeys_cache,scopenum+1)
+ if items_same is None:
+ # nothing to reorder in this scope
+ assert items_other is None
+ return items_done + items_before
+ items_done.extend(items_before)
+ items = items_same + items_other
+ ignore = newignore
+
+
+def slice_items(items, ignore, scoped_argkeys_cache):
+ # we pick the first item which uses a fixture instance in the
+ # requested scope and which we haven't seen yet. We slice the input
+ # items list into a list of items_nomatch, items_same and
+ # items_other
+ if scoped_argkeys_cache: # do we need to do work at all?
+ it = iter(items)
+ # first find a slicing key
+ for i, item in enumerate(it):
+ argkeys = scoped_argkeys_cache.get(item)
+ if argkeys is not None:
+ argkeys = argkeys.difference(ignore)
+ if argkeys: # found a slicing key
+ slicing_argkey = argkeys.pop()
+ items_before = items[:i]
+ items_same = [item]
+ items_other = []
+ # now slice the remainder of the list
+ for item in it:
+ argkeys = scoped_argkeys_cache.get(item)
+ if argkeys and slicing_argkey in argkeys and \
+ slicing_argkey not in ignore:
+ items_same.append(item)
+ else:
+ items_other.append(item)
+ newignore = ignore.copy()
+ newignore.add(slicing_argkey)
+ return (items_before, items_same, items_other, newignore)
+ return items, None, None, None
+
+def get_parametrized_fixture_keys(item, scopenum):
+ """ return list of keys for all parametrized arguments which match
+ the specified scope. """
+ assert scopenum < scopenum_function # function
+ try:
+ cs = item.callspec
+ except AttributeError:
+ pass
+ else:
+ # cs.indictes.items() is random order of argnames but
+ # then again different functions (items) can change order of
+ # arguments so it doesn't matter much probably
+ for argname, param_index in cs.indices.items():
+ if cs._arg2scopenum[argname] != scopenum:
+ continue
+ if scopenum == 0: # session
+ key = (argname, param_index)
+ elif scopenum == 1: # module
+ key = (argname, param_index, item.fspath)
+ elif scopenum == 2: # class
+ key = (argname, param_index, item.fspath, item.cls)
+ yield key
+
+
+def xunitsetup(obj, name):
+ meth = getattr(obj, name, None)
+ if getfixturemarker(meth) is None:
+ return meth
+
+def getfixturemarker(obj):
+ """ return fixturemarker or None if it doesn't exist or raised
+ exceptions."""
+ try:
+ return getattr(obj, "_pytestfixturefunction", None)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ # some objects raise errors like request (from flask import request)
+ # we don't expect them to be fixture functions
+ return None
+
+scopename2class = {
+ 'class': Class,
+ 'module': Module,
+ 'function': pytest.Item,
+}
+def get_scope_node(node, scope):
+ cls = scopename2class.get(scope)
+ if cls is None:
+ if scope == "session":
+ return node.session
+ raise ValueError("unknown scope")
+ return node.getparent(cls)
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/recwarn.py b/testing/web-platform/tests/tools/pytest/_pytest/recwarn.py
new file mode 100644
index 000000000..a89474c03
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/recwarn.py
@@ -0,0 +1,221 @@
+""" recording warnings during test function execution. """
+
+import inspect
+
+import _pytest._code
+import py
+import sys
+import warnings
+import pytest
+
+
+@pytest.yield_fixture
+def recwarn(request):
+ """Return a WarningsRecorder instance that provides these methods:
+
+ * ``pop(category=None)``: return last warning matching the category.
+ * ``clear()``: clear list of warnings
+
+ See http://docs.python.org/library/warnings.html for information
+ on warning categories.
+ """
+ wrec = WarningsRecorder()
+ with wrec:
+ warnings.simplefilter('default')
+ yield wrec
+
+
+def pytest_namespace():
+ return {'deprecated_call': deprecated_call,
+ 'warns': warns}
+
+
+def deprecated_call(func=None, *args, **kwargs):
+ """ assert that calling ``func(*args, **kwargs)`` triggers a
+ ``DeprecationWarning`` or ``PendingDeprecationWarning``.
+
+ This function can be used as a context manager::
+
+ >>> with deprecated_call():
+ ... myobject.deprecated_method()
+
+ Note: we cannot use WarningsRecorder here because it is still subject
+ to the mechanism that prevents warnings of the same type from being
+ triggered twice for the same module. See #1190.
+ """
+ if not func:
+ return WarningsChecker(expected_warning=DeprecationWarning)
+
+ categories = []
+
+ def warn_explicit(message, category, *args, **kwargs):
+ categories.append(category)
+ old_warn_explicit(message, category, *args, **kwargs)
+
+ def warn(message, category=None, *args, **kwargs):
+ if isinstance(message, Warning):
+ categories.append(message.__class__)
+ else:
+ categories.append(category)
+ old_warn(message, category, *args, **kwargs)
+
+ old_warn = warnings.warn
+ old_warn_explicit = warnings.warn_explicit
+ warnings.warn_explicit = warn_explicit
+ warnings.warn = warn
+ try:
+ ret = func(*args, **kwargs)
+ finally:
+ warnings.warn_explicit = old_warn_explicit
+ warnings.warn = old_warn
+ deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
+ if not any(issubclass(c, deprecation_categories) for c in categories):
+ __tracebackhide__ = True
+ raise AssertionError("%r did not produce DeprecationWarning" % (func,))
+ return ret
+
+
+def warns(expected_warning, *args, **kwargs):
+ """Assert that code raises a particular class of warning.
+
+ Specifically, the input @expected_warning can be a warning class or
+ tuple of warning classes, and the code must return that warning
+ (if a single class) or one of those warnings (if a tuple).
+
+ This helper produces a list of ``warnings.WarningMessage`` objects,
+ one for each warning raised.
+
+ This function can be used as a context manager, or any of the other ways
+ ``pytest.raises`` can be used::
+
+ >>> with warns(RuntimeWarning):
+ ... warnings.warn("my warning", RuntimeWarning)
+ """
+ wcheck = WarningsChecker(expected_warning)
+ if not args:
+ return wcheck
+ elif isinstance(args[0], str):
+ code, = args
+ assert isinstance(code, str)
+ frame = sys._getframe(1)
+ loc = frame.f_locals.copy()
+ loc.update(kwargs)
+
+ with wcheck:
+ code = _pytest._code.Source(code).compile()
+ py.builtin.exec_(code, frame.f_globals, loc)
+ else:
+ func = args[0]
+ with wcheck:
+ return func(*args[1:], **kwargs)
+
+
+class RecordedWarning(object):
+ def __init__(self, message, category, filename, lineno, file, line):
+ self.message = message
+ self.category = category
+ self.filename = filename
+ self.lineno = lineno
+ self.file = file
+ self.line = line
+
+
+class WarningsRecorder(object):
+ """A context manager to record raised warnings.
+
+ Adapted from `warnings.catch_warnings`.
+ """
+
+ def __init__(self, module=None):
+ self._module = sys.modules['warnings'] if module is None else module
+ self._entered = False
+ self._list = []
+
+ @property
+ def list(self):
+ """The list of recorded warnings."""
+ return self._list
+
+ def __getitem__(self, i):
+ """Get a recorded warning by index."""
+ return self._list[i]
+
+ def __iter__(self):
+ """Iterate through the recorded warnings."""
+ return iter(self._list)
+
+ def __len__(self):
+ """The number of recorded warnings."""
+ return len(self._list)
+
+ def pop(self, cls=Warning):
+ """Pop the first recorded warning, raise exception if not exists."""
+ for i, w in enumerate(self._list):
+ if issubclass(w.category, cls):
+ return self._list.pop(i)
+ __tracebackhide__ = True
+ raise AssertionError("%r not found in warning list" % cls)
+
+ def clear(self):
+ """Clear the list of recorded warnings."""
+ self._list[:] = []
+
+ def __enter__(self):
+ if self._entered:
+ __tracebackhide__ = True
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+
+ def showwarning(message, category, filename, lineno,
+ file=None, line=None):
+ self._list.append(RecordedWarning(
+ message, category, filename, lineno, file, line))
+
+ # still perform old showwarning functionality
+ self._showwarning(
+ message, category, filename, lineno, file=file, line=line)
+
+ self._module.showwarning = showwarning
+
+ # allow the same warning to be raised more than once
+
+ self._module.simplefilter('always')
+ return self
+
+ def __exit__(self, *exc_info):
+ if not self._entered:
+ __tracebackhide__ = True
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+
+
+class WarningsChecker(WarningsRecorder):
+ def __init__(self, expected_warning=None, module=None):
+ super(WarningsChecker, self).__init__(module=module)
+
+ msg = ("exceptions must be old-style classes or "
+ "derived from Warning, not %s")
+ if isinstance(expected_warning, tuple):
+ for exc in expected_warning:
+ if not inspect.isclass(exc):
+ raise TypeError(msg % type(exc))
+ elif inspect.isclass(expected_warning):
+ expected_warning = (expected_warning,)
+ elif expected_warning is not None:
+ raise TypeError(msg % type(expected_warning))
+
+ self.expected_warning = expected_warning
+
+ def __exit__(self, *exc_info):
+ super(WarningsChecker, self).__exit__(*exc_info)
+
+ # only check if we're not currently handling an exception
+ if all(a is None for a in exc_info):
+ if self.expected_warning is not None:
+ if not any(r.category in self.expected_warning for r in self):
+ __tracebackhide__ = True
+ pytest.fail("DID NOT WARN")
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/resultlog.py b/testing/web-platform/tests/tools/pytest/_pytest/resultlog.py
new file mode 100644
index 000000000..3670f0214
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/resultlog.py
@@ -0,0 +1,104 @@
+""" log machine-parseable test session result information in a plain
+text file.
+"""
+
+import py
+import os
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "resultlog plugin options")
+ group.addoption('--resultlog', '--result-log', action="store",
+ metavar="path", default=None,
+ help="path for machine-readable result log.")
+
+def pytest_configure(config):
+ resultlog = config.option.resultlog
+ # prevent opening resultlog on slave nodes (xdist)
+ if resultlog and not hasattr(config, 'slaveinput'):
+ dirname = os.path.dirname(os.path.abspath(resultlog))
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ logfile = open(resultlog, 'w', 1) # line buffered
+ config._resultlog = ResultLog(config, logfile)
+ config.pluginmanager.register(config._resultlog)
+
+def pytest_unconfigure(config):
+ resultlog = getattr(config, '_resultlog', None)
+ if resultlog:
+ resultlog.logfile.close()
+ del config._resultlog
+ config.pluginmanager.unregister(resultlog)
+
+def generic_path(item):
+ chain = item.listchain()
+ gpath = [chain[0].name]
+ fspath = chain[0].fspath
+ fspart = False
+ for node in chain[1:]:
+ newfspath = node.fspath
+ if newfspath == fspath:
+ if fspart:
+ gpath.append(':')
+ fspart = False
+ else:
+ gpath.append('.')
+ else:
+ gpath.append('/')
+ fspart = True
+ name = node.name
+ if name[0] in '([':
+ gpath.pop()
+ gpath.append(name)
+ fspath = newfspath
+ return ''.join(gpath)
+
+class ResultLog(object):
+ def __init__(self, config, logfile):
+ self.config = config
+ self.logfile = logfile # preferably line buffered
+
+ def write_log_entry(self, testpath, lettercode, longrepr):
+ py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
+ for line in longrepr.splitlines():
+ py.builtin.print_(" %s" % line, file=self.logfile)
+
+ def log_outcome(self, report, lettercode, longrepr):
+ testpath = getattr(report, 'nodeid', None)
+ if testpath is None:
+ testpath = report.fspath
+ self.write_log_entry(testpath, lettercode, longrepr)
+
+ def pytest_runtest_logreport(self, report):
+ if report.when != "call" and report.passed:
+ return
+ res = self.config.hook.pytest_report_teststatus(report=report)
+ code = res[1]
+ if code == 'x':
+ longrepr = str(report.longrepr)
+ elif code == 'X':
+ longrepr = ''
+ elif report.passed:
+ longrepr = ""
+ elif report.failed:
+ longrepr = str(report.longrepr)
+ elif report.skipped:
+ longrepr = str(report.longrepr[2])
+ self.log_outcome(report, code, longrepr)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ code = "F"
+ longrepr = str(report.longrepr)
+ else:
+ assert report.skipped
+ code = "S"
+ longrepr = "%s:%d: %s" % report.longrepr
+ self.log_outcome(report, code, longrepr)
+
+ def pytest_internalerror(self, excrepr):
+ reprcrash = getattr(excrepr, 'reprcrash', None)
+ path = getattr(reprcrash, "path", None)
+ if path is None:
+ path = "cwd:%s" % py.path.local()
+ self.write_log_entry(path, '!', str(excrepr))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/runner.py b/testing/web-platform/tests/tools/pytest/_pytest/runner.py
new file mode 100644
index 000000000..cde94c8c8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/runner.py
@@ -0,0 +1,515 @@
+""" basic collect and runtest protocol implementations """
+import bdb
+import sys
+from time import time
+
+import py
+import pytest
+from _pytest._code.code import TerminalRepr, ExceptionInfo
+
+
+def pytest_namespace():
+ return {
+ 'fail' : fail,
+ 'skip' : skip,
+ 'importorskip' : importorskip,
+ 'exit' : exit,
+ }
+
+#
+# pytest plugin hooks
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group.addoption('--durations',
+ action="store", type=int, default=None, metavar="N",
+ help="show N slowest setup/test durations (N=0 for all)."),
+
+def pytest_terminal_summary(terminalreporter):
+ durations = terminalreporter.config.option.durations
+ if durations is None:
+ return
+ tr = terminalreporter
+ dlist = []
+ for replist in tr.stats.values():
+ for rep in replist:
+ if hasattr(rep, 'duration'):
+ dlist.append(rep)
+ if not dlist:
+ return
+ dlist.sort(key=lambda x: x.duration)
+ dlist.reverse()
+ if not durations:
+ tr.write_sep("=", "slowest test durations")
+ else:
+ tr.write_sep("=", "slowest %s test durations" % durations)
+ dlist = dlist[:durations]
+
+ for rep in dlist:
+ nodeid = rep.nodeid.replace("::()::", "::")
+ tr.write_line("%02.2fs %-8s %s" %
+ (rep.duration, rep.when, nodeid))
+
+def pytest_sessionstart(session):
+ session._setupstate = SetupState()
+def pytest_sessionfinish(session):
+ session._setupstate.teardown_all()
+
+class NodeInfo:
+ def __init__(self, location):
+ self.location = location
+
+def pytest_runtest_protocol(item, nextitem):
+ item.ihook.pytest_runtest_logstart(
+ nodeid=item.nodeid, location=item.location,
+ )
+ runtestprotocol(item, nextitem=nextitem)
+ return True
+
+def runtestprotocol(item, log=True, nextitem=None):
+ hasrequest = hasattr(item, "_request")
+ if hasrequest and not item._request:
+ item._initrequest()
+ rep = call_and_report(item, "setup", log)
+ reports = [rep]
+ if rep.passed:
+ reports.append(call_and_report(item, "call", log))
+ reports.append(call_and_report(item, "teardown", log,
+ nextitem=nextitem))
+ # after all teardown hooks have been called
+ # want funcargs and request info to go away
+ if hasrequest:
+ item._request = False
+ item.funcargs = None
+ return reports
+
+def pytest_runtest_setup(item):
+ item.session._setupstate.prepare(item)
+
+def pytest_runtest_call(item):
+ try:
+ item.runtest()
+ except Exception:
+ # Store trace info to allow postmortem debugging
+ type, value, tb = sys.exc_info()
+ tb = tb.tb_next # Skip *this* frame
+ sys.last_type = type
+ sys.last_value = value
+ sys.last_traceback = tb
+ del tb # Get rid of it in this namespace
+ raise
+
+def pytest_runtest_teardown(item, nextitem):
+ item.session._setupstate.teardown_exact(item, nextitem)
+
+def pytest_report_teststatus(report):
+ if report.when in ("setup", "teardown"):
+ if report.failed:
+ # category, shortletter, verbose-word
+ return "error", "E", "ERROR"
+ elif report.skipped:
+ return "skipped", "s", "SKIPPED"
+ else:
+ return "", "", ""
+
+
+#
+# Implementation
+
+def call_and_report(item, when, log=True, **kwds):
+ call = call_runtest_hook(item, when, **kwds)
+ hook = item.ihook
+ report = hook.pytest_runtest_makereport(item=item, call=call)
+ if log:
+ hook.pytest_runtest_logreport(report=report)
+ if check_interactive_exception(call, report):
+ hook.pytest_exception_interact(node=item, call=call, report=report)
+ return report
+
+def check_interactive_exception(call, report):
+ return call.excinfo and not (
+ hasattr(report, "wasxfail") or
+ call.excinfo.errisinstance(skip.Exception) or
+ call.excinfo.errisinstance(bdb.BdbQuit))
+
+def call_runtest_hook(item, when, **kwds):
+ hookname = "pytest_runtest_" + when
+ ihook = getattr(item.ihook, hookname)
+ return CallInfo(lambda: ihook(item=item, **kwds), when=when)
+
+class CallInfo:
+ """ Result/Exception info a function invocation. """
+ #: None or ExceptionInfo object.
+ excinfo = None
+ def __init__(self, func, when):
+ #: context of invocation: one of "setup", "call",
+ #: "teardown", "memocollect"
+ self.when = when
+ self.start = time()
+ try:
+ self.result = func()
+ except KeyboardInterrupt:
+ self.stop = time()
+ raise
+ except:
+ self.excinfo = ExceptionInfo()
+ self.stop = time()
+
+ def __repr__(self):
+ if self.excinfo:
+ status = "exception: %s" % str(self.excinfo.value)
+ else:
+ status = "result: %r" % (self.result,)
+ return "<CallInfo when=%r %s>" % (self.when, status)
+
+def getslaveinfoline(node):
+ try:
+ return node._slaveinfocache
+ except AttributeError:
+ d = node.slaveinfo
+ ver = "%s.%s.%s" % d['version_info'][:3]
+ node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
+ d['id'], d['sysplatform'], ver, d['executable'])
+ return s
+
+class BaseReport(object):
+
+ def __init__(self, **kw):
+ self.__dict__.update(kw)
+
+ def toterminal(self, out):
+ if hasattr(self, 'node'):
+ out.line(getslaveinfoline(self.node))
+
+ longrepr = self.longrepr
+ if longrepr is None:
+ return
+
+ if hasattr(longrepr, 'toterminal'):
+ longrepr.toterminal(out)
+ else:
+ try:
+ out.line(longrepr)
+ except UnicodeEncodeError:
+ out.line("<unprintable longrepr>")
+
+ def get_sections(self, prefix):
+ for name, content in self.sections:
+ if name.startswith(prefix):
+ yield prefix, content
+
+ passed = property(lambda x: x.outcome == "passed")
+ failed = property(lambda x: x.outcome == "failed")
+ skipped = property(lambda x: x.outcome == "skipped")
+
+ @property
+ def fspath(self):
+ return self.nodeid.split("::")[0]
+
+def pytest_runtest_makereport(item, call):
+ when = call.when
+ duration = call.stop-call.start
+ keywords = dict([(x,1) for x in item.keywords])
+ excinfo = call.excinfo
+ sections = []
+ if not call.excinfo:
+ outcome = "passed"
+ longrepr = None
+ else:
+ if not isinstance(excinfo, ExceptionInfo):
+ outcome = "failed"
+ longrepr = excinfo
+ elif excinfo.errisinstance(pytest.skip.Exception):
+ outcome = "skipped"
+ r = excinfo._getreprcrash()
+ longrepr = (str(r.path), r.lineno, r.message)
+ else:
+ outcome = "failed"
+ if call.when == "call":
+ longrepr = item.repr_failure(excinfo)
+ else: # exception in setup or teardown
+ longrepr = item._repr_failure_py(excinfo,
+ style=item.config.option.tbstyle)
+ for rwhen, key, content in item._report_sections:
+ sections.append(("Captured %s %s" %(key, rwhen), content))
+ return TestReport(item.nodeid, item.location,
+ keywords, outcome, longrepr, when,
+ sections, duration)
+
+class TestReport(BaseReport):
+ """ Basic test report object (also used for setup and teardown calls if
+ they fail).
+ """
+ def __init__(self, nodeid, location, keywords, outcome,
+ longrepr, when, sections=(), duration=0, **extra):
+ #: normalized collection node id
+ self.nodeid = nodeid
+
+ #: a (filesystempath, lineno, domaininfo) tuple indicating the
+ #: actual location of a test item - it might be different from the
+ #: collected one e.g. if a method is inherited from a different module.
+ self.location = location
+
+ #: a name -> value dictionary containing all keywords and
+ #: markers associated with a test invocation.
+ self.keywords = keywords
+
+ #: test outcome, always one of "passed", "failed", "skipped".
+ self.outcome = outcome
+
+ #: None or a failure representation.
+ self.longrepr = longrepr
+
+ #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
+ self.when = when
+
+ #: list of (secname, data) extra information which needs to
+ #: marshallable
+ self.sections = list(sections)
+
+ #: time it took to run just the test
+ self.duration = duration
+
+ self.__dict__.update(extra)
+
+ def __repr__(self):
+ return "<TestReport %r when=%r outcome=%r>" % (
+ self.nodeid, self.when, self.outcome)
+
+class TeardownErrorReport(BaseReport):
+ outcome = "failed"
+ when = "teardown"
+ def __init__(self, longrepr, **extra):
+ self.longrepr = longrepr
+ self.sections = []
+ self.__dict__.update(extra)
+
+def pytest_make_collect_report(collector):
+ call = CallInfo(collector._memocollect, "memocollect")
+ longrepr = None
+ if not call.excinfo:
+ outcome = "passed"
+ else:
+ from _pytest import nose
+ skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
+ if call.excinfo.errisinstance(skip_exceptions):
+ outcome = "skipped"
+ r = collector._repr_failure_py(call.excinfo, "line").reprcrash
+ longrepr = (str(r.path), r.lineno, r.message)
+ else:
+ outcome = "failed"
+ errorinfo = collector.repr_failure(call.excinfo)
+ if not hasattr(errorinfo, "toterminal"):
+ errorinfo = CollectErrorRepr(errorinfo)
+ longrepr = errorinfo
+ rep = CollectReport(collector.nodeid, outcome, longrepr,
+ getattr(call, 'result', None))
+ rep.call = call # see collect_one_node
+ return rep
+
+
+class CollectReport(BaseReport):
+ def __init__(self, nodeid, outcome, longrepr, result,
+ sections=(), **extra):
+ self.nodeid = nodeid
+ self.outcome = outcome
+ self.longrepr = longrepr
+ self.result = result or []
+ self.sections = list(sections)
+ self.__dict__.update(extra)
+
+ @property
+ def location(self):
+ return (self.fspath, None, self.fspath)
+
+ def __repr__(self):
+ return "<CollectReport %r lenresult=%s outcome=%r>" % (
+ self.nodeid, len(self.result), self.outcome)
+
+class CollectErrorRepr(TerminalRepr):
+ def __init__(self, msg):
+ self.longrepr = msg
+ def toterminal(self, out):
+ out.line(self.longrepr, red=True)
+
+class SetupState(object):
+ """ shared state for setting up/tearing down test items or collectors. """
+ def __init__(self):
+ self.stack = []
+ self._finalizers = {}
+
+ def addfinalizer(self, finalizer, colitem):
+ """ attach a finalizer to the given colitem.
+ if colitem is None, this will add a finalizer that
+ is called at the end of teardown_all().
+ """
+ assert colitem and not isinstance(colitem, tuple)
+ assert py.builtin.callable(finalizer)
+ #assert colitem in self.stack # some unit tests don't setup stack :/
+ self._finalizers.setdefault(colitem, []).append(finalizer)
+
+ def _pop_and_teardown(self):
+ colitem = self.stack.pop()
+ self._teardown_with_finalization(colitem)
+
+ def _callfinalizers(self, colitem):
+ finalizers = self._finalizers.pop(colitem, None)
+ exc = None
+ while finalizers:
+ fin = finalizers.pop()
+ try:
+ fin()
+ except Exception:
+ # XXX Only first exception will be seen by user,
+ # ideally all should be reported.
+ if exc is None:
+ exc = sys.exc_info()
+ if exc:
+ py.builtin._reraise(*exc)
+
+ def _teardown_with_finalization(self, colitem):
+ self._callfinalizers(colitem)
+ if hasattr(colitem, "teardown"):
+ colitem.teardown()
+ for colitem in self._finalizers:
+ assert colitem is None or colitem in self.stack \
+ or isinstance(colitem, tuple)
+
+ def teardown_all(self):
+ while self.stack:
+ self._pop_and_teardown()
+ for key in list(self._finalizers):
+ self._teardown_with_finalization(key)
+ assert not self._finalizers
+
+ def teardown_exact(self, item, nextitem):
+ needed_collectors = nextitem and nextitem.listchain() or []
+ self._teardown_towards(needed_collectors)
+
+ def _teardown_towards(self, needed_collectors):
+ while self.stack:
+ if self.stack == needed_collectors[:len(self.stack)]:
+ break
+ self._pop_and_teardown()
+
+ def prepare(self, colitem):
+ """ setup objects along the collector chain to the test-method
+ and teardown previously setup objects."""
+ needed_collectors = colitem.listchain()
+ self._teardown_towards(needed_collectors)
+
+ # check if the last collection node has raised an error
+ for col in self.stack:
+ if hasattr(col, '_prepare_exc'):
+ py.builtin._reraise(*col._prepare_exc)
+ for col in needed_collectors[len(self.stack):]:
+ self.stack.append(col)
+ try:
+ col.setup()
+ except Exception:
+ col._prepare_exc = sys.exc_info()
+ raise
+
+def collect_one_node(collector):
+ ihook = collector.ihook
+ ihook.pytest_collectstart(collector=collector)
+ rep = ihook.pytest_make_collect_report(collector=collector)
+ call = rep.__dict__.pop("call", None)
+ if call and check_interactive_exception(call, rep):
+ ihook.pytest_exception_interact(node=collector, call=call, report=rep)
+ return rep
+
+
+# =============================================================
+# Test OutcomeExceptions and helpers for creating them.
+
+
+class OutcomeException(Exception):
+ """ OutcomeException and its subclass instances indicate and
+ contain info about test and collection outcomes.
+ """
+ def __init__(self, msg=None, pytrace=True):
+ Exception.__init__(self, msg)
+ self.msg = msg
+ self.pytrace = pytrace
+
+ def __repr__(self):
+ if self.msg:
+ val = self.msg
+ if isinstance(val, bytes):
+ val = py._builtin._totext(val, errors='replace')
+ return val
+ return "<%s instance>" %(self.__class__.__name__,)
+ __str__ = __repr__
+
+class Skipped(OutcomeException):
+ # XXX hackish: on 3k we fake to live in the builtins
+ # in order to have Skipped exception printing shorter/nicer
+ __module__ = 'builtins'
+
+class Failed(OutcomeException):
+ """ raised from an explicit call to pytest.fail() """
+ __module__ = 'builtins'
+
+class Exit(KeyboardInterrupt):
+ """ raised for immediate program exits (no tracebacks/summaries)"""
+ def __init__(self, msg="unknown reason"):
+ self.msg = msg
+ KeyboardInterrupt.__init__(self, msg)
+
+# exposed helper methods
+
+def exit(msg):
+ """ exit testing process as if KeyboardInterrupt was triggered. """
+ __tracebackhide__ = True
+ raise Exit(msg)
+
+exit.Exception = Exit
+
+def skip(msg=""):
+ """ skip an executing test with the given message. Note: it's usually
+ better to use the pytest.mark.skipif marker to declare a test to be
+ skipped under certain conditions like mismatching platforms or
+ dependencies. See the pytest_skipping plugin for details.
+ """
+ __tracebackhide__ = True
+ raise Skipped(msg=msg)
+skip.Exception = Skipped
+
+def fail(msg="", pytrace=True):
+ """ explicitly fail an currently-executing test with the given Message.
+
+ :arg pytrace: if false the msg represents the full failure information
+ and no python traceback will be reported.
+ """
+ __tracebackhide__ = True
+ raise Failed(msg=msg, pytrace=pytrace)
+fail.Exception = Failed
+
+
+def importorskip(modname, minversion=None):
+ """ return imported module if it has at least "minversion" as its
+ __version__ attribute. If no minversion is specified the a skip
+ is only triggered if the module can not be imported.
+ """
+ __tracebackhide__ = True
+ compile(modname, '', 'eval') # to catch syntaxerrors
+ try:
+ __import__(modname)
+ except ImportError:
+ skip("could not import %r" %(modname,))
+ mod = sys.modules[modname]
+ if minversion is None:
+ return mod
+ verattr = getattr(mod, '__version__', None)
+ if minversion is not None:
+ try:
+ from pkg_resources import parse_version as pv
+ except ImportError:
+ skip("we have a required version for %r but can not import "
+ "no pkg_resources to parse version strings." %(modname,))
+ if verattr is None or pv(verattr) < pv(minversion):
+ skip("module %r has __version__ %r, required is: %r" %(
+ modname, verattr, minversion))
+ return mod
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/skipping.py b/testing/web-platform/tests/tools/pytest/_pytest/skipping.py
new file mode 100644
index 000000000..69157f485
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/skipping.py
@@ -0,0 +1,354 @@
+""" support for skip/xfail functions and markers. """
+import os
+import sys
+import traceback
+
+import py
+import pytest
+from _pytest.mark import MarkInfo, MarkDecorator
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption('--runxfail',
+ action="store_true", dest="runxfail", default=False,
+ help="run tests even if they are marked xfail")
+
+ parser.addini("xfail_strict", "default for the strict parameter of xfail "
+ "markers when not given explicitly (default: "
+ "False)",
+ default=False,
+ type="bool")
+
+
+def pytest_configure(config):
+ if config.option.runxfail:
+ old = pytest.xfail
+ config._cleanup.append(lambda: setattr(pytest, "xfail", old))
+ def nop(*args, **kwargs):
+ pass
+ nop.Exception = XFailed
+ setattr(pytest, "xfail", nop)
+
+ config.addinivalue_line("markers",
+ "skipif(condition): skip the given test function if eval(condition) "
+ "results in a True value. Evaluation happens within the "
+ "module global context. Example: skipif('sys.platform == \"win32\"') "
+ "skips the test if we are on the win32 platform. see "
+ "http://pytest.org/latest/skipping.html"
+ )
+ config.addinivalue_line("markers",
+ "xfail(condition, reason=None, run=True, raises=None): mark the the test function "
+ "as an expected failure if eval(condition) has a True value. "
+ "Optionally specify a reason for better reporting and run=False if "
+ "you don't even want to execute the test function. If only specific "
+ "exception(s) are expected, you can list them in raises, and if the test fails "
+ "in other ways, it will be reported as a true failure. "
+ "See http://pytest.org/latest/skipping.html"
+ )
+
+
+def pytest_namespace():
+ return dict(xfail=xfail)
+
+
+class XFailed(pytest.fail.Exception):
+ """ raised from an explicit call to pytest.xfail() """
+
+
+def xfail(reason=""):
+ """ xfail an executing test or setup functions with the given reason."""
+ __tracebackhide__ = True
+ raise XFailed(reason)
+xfail.Exception = XFailed
+
+
+class MarkEvaluator:
+ def __init__(self, item, name):
+ self.item = item
+ self.name = name
+
+ @property
+ def holder(self):
+ return self.item.keywords.get(self.name)
+
+ def __bool__(self):
+ return bool(self.holder)
+ __nonzero__ = __bool__
+
+ def wasvalid(self):
+ return not hasattr(self, 'exc')
+
+ def invalidraise(self, exc):
+ raises = self.get('raises')
+ if not raises:
+ return
+ return not isinstance(exc, raises)
+
+ def istrue(self):
+ try:
+ return self._istrue()
+ except Exception:
+ self.exc = sys.exc_info()
+ if isinstance(self.exc[1], SyntaxError):
+ msg = [" " * (self.exc[1].offset + 4) + "^",]
+ msg.append("SyntaxError: invalid syntax")
+ else:
+ msg = traceback.format_exception_only(*self.exc[:2])
+ pytest.fail("Error evaluating %r expression\n"
+ " %s\n"
+ "%s"
+ %(self.name, self.expr, "\n".join(msg)),
+ pytrace=False)
+
+ def _getglobals(self):
+ d = {'os': os, 'sys': sys, 'config': self.item.config}
+ func = self.item.obj
+ try:
+ d.update(func.__globals__)
+ except AttributeError:
+ d.update(func.func_globals)
+ return d
+
+ def _istrue(self):
+ if hasattr(self, 'result'):
+ return self.result
+ if self.holder:
+ d = self._getglobals()
+ if self.holder.args:
+ self.result = False
+ # "holder" might be a MarkInfo or a MarkDecorator; only
+ # MarkInfo keeps track of all parameters it received in an
+ # _arglist attribute
+ if hasattr(self.holder, '_arglist'):
+ arglist = self.holder._arglist
+ else:
+ arglist = [(self.holder.args, self.holder.kwargs)]
+ for args, kwargs in arglist:
+ for expr in args:
+ self.expr = expr
+ if isinstance(expr, py.builtin._basestring):
+ result = cached_eval(self.item.config, expr, d)
+ else:
+ if "reason" not in kwargs:
+ # XXX better be checked at collection time
+ msg = "you need to specify reason=STRING " \
+ "when using booleans as conditions."
+ pytest.fail(msg)
+ result = bool(expr)
+ if result:
+ self.result = True
+ self.reason = kwargs.get('reason', None)
+ self.expr = expr
+ return self.result
+ else:
+ self.result = True
+ return getattr(self, 'result', False)
+
+ def get(self, attr, default=None):
+ return self.holder.kwargs.get(attr, default)
+
+ def getexplanation(self):
+ expl = getattr(self, 'reason', None) or self.get('reason', None)
+ if not expl:
+ if not hasattr(self, 'expr'):
+ return ""
+ else:
+ return "condition: " + str(self.expr)
+ return expl
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_setup(item):
+ # Check if skip or skipif are specified as pytest marks
+
+ skipif_info = item.keywords.get('skipif')
+ if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
+ eval_skipif = MarkEvaluator(item, 'skipif')
+ if eval_skipif.istrue():
+ item._evalskip = eval_skipif
+ pytest.skip(eval_skipif.getexplanation())
+
+ skip_info = item.keywords.get('skip')
+ if isinstance(skip_info, (MarkInfo, MarkDecorator)):
+ item._evalskip = True
+ if 'reason' in skip_info.kwargs:
+ pytest.skip(skip_info.kwargs['reason'])
+ elif skip_info.args:
+ pytest.skip(skip_info.args[0])
+ else:
+ pytest.skip("unconditional skip")
+
+ item._evalxfail = MarkEvaluator(item, 'xfail')
+ check_xfail_no_run(item)
+
+
+@pytest.mark.hookwrapper
+def pytest_pyfunc_call(pyfuncitem):
+ check_xfail_no_run(pyfuncitem)
+ outcome = yield
+ passed = outcome.excinfo is None
+ if passed:
+ check_strict_xfail(pyfuncitem)
+
+
+def check_xfail_no_run(item):
+ """check xfail(run=False)"""
+ if not item.config.option.runxfail:
+ evalxfail = item._evalxfail
+ if evalxfail.istrue():
+ if not evalxfail.get('run', True):
+ pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
+
+
+def check_strict_xfail(pyfuncitem):
+ """check xfail(strict=True) for the given PASSING test"""
+ evalxfail = pyfuncitem._evalxfail
+ if evalxfail.istrue():
+ strict_default = pyfuncitem.config.getini('xfail_strict')
+ is_strict_xfail = evalxfail.get('strict', strict_default)
+ if is_strict_xfail:
+ del pyfuncitem._evalxfail
+ explanation = evalxfail.getexplanation()
+ pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ outcome = yield
+ rep = outcome.get_result()
+ evalxfail = getattr(item, '_evalxfail', None)
+ evalskip = getattr(item, '_evalskip', None)
+ # unitttest special case, see setting of _unexpectedsuccess
+ if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
+ # we need to translate into how pytest encodes xpass
+ rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
+ rep.outcome = "failed"
+ elif item.config.option.runxfail:
+ pass # don't interefere
+ elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
+ rep.wasxfail = "reason: " + call.excinfo.value.msg
+ rep.outcome = "skipped"
+ elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
+ evalxfail.istrue():
+ if call.excinfo:
+ if evalxfail.invalidraise(call.excinfo.value):
+ rep.outcome = "failed"
+ else:
+ rep.outcome = "skipped"
+ rep.wasxfail = evalxfail.getexplanation()
+ elif call.when == "call":
+ rep.outcome = "failed" # xpass outcome
+ rep.wasxfail = evalxfail.getexplanation()
+ elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
+ # skipped by mark.skipif; change the location of the failure
+ # to point to the item definition, otherwise it will display
+ # the location of where the skip exception was raised within pytest
+ filename, line, reason = rep.longrepr
+ filename, line = item.location[:2]
+ rep.longrepr = filename, line, reason
+
+# called by terminalreporter progress reporting
+def pytest_report_teststatus(report):
+ if hasattr(report, "wasxfail"):
+ if report.skipped:
+ return "xfailed", "x", "xfail"
+ elif report.failed:
+ return "xpassed", "X", ("XPASS", {'yellow': True})
+
+# called by the terminalreporter instance/plugin
+def pytest_terminal_summary(terminalreporter):
+ tr = terminalreporter
+ if not tr.reportchars:
+ #for name in "xfailed skipped failed xpassed":
+ # if not tr.stats.get(name, 0):
+ # tr.write_line("HINT: use '-r' option to see extra "
+ # "summary info about tests")
+ # break
+ return
+
+ lines = []
+ for char in tr.reportchars:
+ if char == "x":
+ show_xfailed(terminalreporter, lines)
+ elif char == "X":
+ show_xpassed(terminalreporter, lines)
+ elif char in "fF":
+ show_simple(terminalreporter, lines, 'failed', "FAIL %s")
+ elif char in "sS":
+ show_skipped(terminalreporter, lines)
+ elif char == "E":
+ show_simple(terminalreporter, lines, 'error', "ERROR %s")
+ elif char == 'p':
+ show_simple(terminalreporter, lines, 'passed', "PASSED %s")
+
+ if lines:
+ tr._tw.sep("=", "short test summary info")
+ for line in lines:
+ tr._tw.line(line)
+
+def show_simple(terminalreporter, lines, stat, format):
+ failed = terminalreporter.stats.get(stat)
+ if failed:
+ for rep in failed:
+ pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+ lines.append(format %(pos,))
+
+def show_xfailed(terminalreporter, lines):
+ xfailed = terminalreporter.stats.get("xfailed")
+ if xfailed:
+ for rep in xfailed:
+ pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+ reason = rep.wasxfail
+ lines.append("XFAIL %s" % (pos,))
+ if reason:
+ lines.append(" " + str(reason))
+
+def show_xpassed(terminalreporter, lines):
+ xpassed = terminalreporter.stats.get("xpassed")
+ if xpassed:
+ for rep in xpassed:
+ pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+ reason = rep.wasxfail
+ lines.append("XPASS %s %s" %(pos, reason))
+
+def cached_eval(config, expr, d):
+ if not hasattr(config, '_evalcache'):
+ config._evalcache = {}
+ try:
+ return config._evalcache[expr]
+ except KeyError:
+ import _pytest._code
+ exprcode = _pytest._code.compile(expr, mode="eval")
+ config._evalcache[expr] = x = eval(exprcode, d)
+ return x
+
+
+def folded_skips(skipped):
+ d = {}
+ for event in skipped:
+ key = event.longrepr
+ assert len(key) == 3, (event, key)
+ d.setdefault(key, []).append(event)
+ l = []
+ for key, events in d.items():
+ l.append((len(events),) + key)
+ return l
+
+def show_skipped(terminalreporter, lines):
+ tr = terminalreporter
+ skipped = tr.stats.get('skipped', [])
+ if skipped:
+ #if not tr.hasopt('skipped'):
+ # tr.write_line(
+ # "%d skipped tests, specify -rs for more info" %
+ # len(skipped))
+ # return
+ fskips = folded_skips(skipped)
+ if fskips:
+ #tr.write_sep("_", "skipped test summary")
+ for num, fspath, lineno, reason in fskips:
+ if reason.startswith("Skipped: "):
+ reason = reason[9:]
+ lines.append("SKIP [%d] %s:%d: %s" %
+ (num, fspath, lineno, reason))
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/standalonetemplate.py b/testing/web-platform/tests/tools/pytest/_pytest/standalonetemplate.py
new file mode 100755
index 000000000..484d5d1b2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/standalonetemplate.py
@@ -0,0 +1,89 @@
+#! /usr/bin/env python
+
+# Hi There!
+# You may be wondering what this giant blob of binary data here is, you might
+# even be worried that we're up to something nefarious (good for you for being
+# paranoid!). This is a base64 encoding of a zip file, this zip file contains
+# a fully functional basic pytest script.
+#
+# Pytest is a thing that tests packages, pytest itself is a package that some-
+# one might want to install, especially if they're looking to run tests inside
+# some package they want to install. Pytest has a lot of code to collect and
+# execute tests, and other such sort of "tribal knowledge" that has been en-
+# coded in its code base. Because of this we basically include a basic copy
+# of pytest inside this blob. We do this because it let's you as a maintainer
+# or application developer who wants people who don't deal with python much to
+# easily run tests without installing the complete pytest package.
+#
+# If you're wondering how this is created: you can create it yourself if you
+# have a complete pytest installation by using this command on the command-
+# line: ``py.test --genscript=runtests.py``.
+
+sources = """
+@SOURCES@"""
+
+import sys
+import base64
+import zlib
+
+class DictImporter(object):
+ def __init__(self, sources):
+ self.sources = sources
+
+ def find_module(self, fullname, path=None):
+ if fullname == "argparse" and sys.version_info >= (2,7):
+ # we were generated with <python2.7 (which pulls in argparse)
+ # but we are running now on a stdlib which has it, so use that.
+ return None
+ if fullname in self.sources:
+ return self
+ if fullname + '.__init__' in self.sources:
+ return self
+ return None
+
+ def load_module(self, fullname):
+ # print "load_module:", fullname
+ from types import ModuleType
+ try:
+ s = self.sources[fullname]
+ is_pkg = False
+ except KeyError:
+ s = self.sources[fullname + '.__init__']
+ is_pkg = True
+
+ co = compile(s, fullname, 'exec')
+ module = sys.modules.setdefault(fullname, ModuleType(fullname))
+ module.__file__ = "%s/%s" % (__file__, fullname)
+ module.__loader__ = self
+ if is_pkg:
+ module.__path__ = [fullname]
+
+ do_exec(co, module.__dict__) # noqa
+ return sys.modules[fullname]
+
+ def get_source(self, name):
+ res = self.sources.get(name)
+ if res is None:
+ res = self.sources.get(name + '.__init__')
+ return res
+
+if __name__ == "__main__":
+ try:
+ import pkg_resources # noqa
+ except ImportError:
+ sys.stderr.write("ERROR: setuptools not installed\n")
+ sys.exit(2)
+ if sys.version_info >= (3, 0):
+ exec("def do_exec(co, loc): exec(co, loc)\n")
+ import pickle
+ sources = sources.encode("ascii") # ensure bytes
+ sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
+ else:
+ import cPickle as pickle
+ exec("def do_exec(co, loc): exec co in loc\n")
+ sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
+
+ importer = DictImporter(sources)
+ sys.meta_path.insert(0, importer)
+ entry = "@ENTRY@"
+ do_exec(entry, locals()) # noqa
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/terminal.py b/testing/web-platform/tests/tools/pytest/_pytest/terminal.py
new file mode 100644
index 000000000..825f553ef
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/terminal.py
@@ -0,0 +1,593 @@
+""" terminal reporting of the full testing process.
+
+This is a good source for looking at the various reporting hooks.
+"""
+from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
+ EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
+import pytest
+import py
+import sys
+import time
+import platform
+
+import _pytest._pluggy as pluggy
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group._addoption('-v', '--verbose', action="count",
+ dest="verbose", default=0, help="increase verbosity."),
+ group._addoption('-q', '--quiet', action="count",
+ dest="quiet", default=0, help="decrease verbosity."),
+ group._addoption('-r',
+ action="store", dest="reportchars", default=None, metavar="chars",
+ help="show extra test summary info as specified by chars (f)ailed, "
+ "(E)error, (s)skipped, (x)failed, (X)passed (w)pytest-warnings "
+ "(p)passed, (P)passed with output, (a)all except pP.")
+ group._addoption('-l', '--showlocals',
+ action="store_true", dest="showlocals", default=False,
+ help="show locals in tracebacks (disabled by default).")
+ group._addoption('--report',
+ action="store", dest="report", default=None, metavar="opts",
+ help="(deprecated, use -r)")
+ group._addoption('--tb', metavar="style",
+ action="store", dest="tbstyle", default='auto',
+ choices=['auto', 'long', 'short', 'no', 'line', 'native'],
+ help="traceback print mode (auto/long/short/line/native/no).")
+ group._addoption('--fulltrace', '--full-trace',
+ action="store_true", default=False,
+ help="don't cut any tracebacks (default is to cut).")
+ group._addoption('--color', metavar="color",
+ action="store", dest="color", default='auto',
+ choices=['yes', 'no', 'auto'],
+ help="color terminal output (yes/no/auto).")
+
+def pytest_configure(config):
+ config.option.verbose -= config.option.quiet
+ reporter = TerminalReporter(config, sys.stdout)
+ config.pluginmanager.register(reporter, 'terminalreporter')
+ if config.option.debug or config.option.traceconfig:
+ def mywriter(tags, args):
+ msg = " ".join(map(str, args))
+ reporter.write_line("[traceconfig] " + msg)
+ config.trace.root.setprocessor("pytest:config", mywriter)
+
+def getreportopt(config):
+ reportopts = ""
+ optvalue = config.option.report
+ if optvalue:
+ py.builtin.print_("DEPRECATED: use -r instead of --report option.",
+ file=sys.stderr)
+ if optvalue:
+ for setting in optvalue.split(","):
+ setting = setting.strip()
+ if setting == "skipped":
+ reportopts += "s"
+ elif setting == "xfailed":
+ reportopts += "x"
+ reportchars = config.option.reportchars
+ if reportchars:
+ for char in reportchars:
+ if char not in reportopts and char != 'a':
+ reportopts += char
+ elif char == 'a':
+ reportopts = 'fEsxXw'
+ return reportopts
+
+def pytest_report_teststatus(report):
+ if report.passed:
+ letter = "."
+ elif report.skipped:
+ letter = "s"
+ elif report.failed:
+ letter = "F"
+ if report.when != "call":
+ letter = "f"
+ return report.outcome, letter, report.outcome.upper()
+
+class WarningReport:
+ def __init__(self, code, message, nodeid=None, fslocation=None):
+ self.code = code
+ self.message = message
+ self.nodeid = nodeid
+ self.fslocation = fslocation
+
+
+class TerminalReporter:
+ def __init__(self, config, file=None):
+ import _pytest.config
+ self.config = config
+ self.verbosity = self.config.option.verbose
+ self.showheader = self.verbosity >= 0
+ self.showfspath = self.verbosity >= 0
+ self.showlongtestinfo = self.verbosity > 0
+ self._numcollected = 0
+
+ self.stats = {}
+ self.startdir = py.path.local()
+ if file is None:
+ file = sys.stdout
+ self._tw = self.writer = _pytest.config.create_terminal_writer(config,
+ file)
+ self.currentfspath = None
+ self.reportchars = getreportopt(config)
+ self.hasmarkup = self._tw.hasmarkup
+ self.isatty = file.isatty()
+
+ def hasopt(self, char):
+ char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
+ return char in self.reportchars
+
+ def write_fspath_result(self, nodeid, res):
+ fspath = self.config.rootdir.join(nodeid.split("::")[0])
+ if fspath != self.currentfspath:
+ self.currentfspath = fspath
+ fspath = self.startdir.bestrelpath(fspath)
+ self._tw.line()
+ self._tw.write(fspath + " ")
+ self._tw.write(res)
+
+ def write_ensure_prefix(self, prefix, extra="", **kwargs):
+ if self.currentfspath != prefix:
+ self._tw.line()
+ self.currentfspath = prefix
+ self._tw.write(prefix)
+ if extra:
+ self._tw.write(extra, **kwargs)
+ self.currentfspath = -2
+
+ def ensure_newline(self):
+ if self.currentfspath:
+ self._tw.line()
+ self.currentfspath = None
+
+ def write(self, content, **markup):
+ self._tw.write(content, **markup)
+
+ def write_line(self, line, **markup):
+ if not py.builtin._istext(line):
+ line = py.builtin.text(line, errors="replace")
+ self.ensure_newline()
+ self._tw.line(line, **markup)
+
+ def rewrite(self, line, **markup):
+ line = str(line)
+ self._tw.write("\r" + line, **markup)
+
+ def write_sep(self, sep, title=None, **markup):
+ self.ensure_newline()
+ self._tw.sep(sep, title, **markup)
+
+ def section(self, title, sep="=", **kw):
+ self._tw.sep(sep, title, **kw)
+
+ def line(self, msg, **kw):
+ self._tw.line(msg, **kw)
+
+ def pytest_internalerror(self, excrepr):
+ for line in py.builtin.text(excrepr).split("\n"):
+ self.write_line("INTERNALERROR> " + line)
+ return 1
+
+ def pytest_logwarning(self, code, fslocation, message, nodeid):
+ warnings = self.stats.setdefault("warnings", [])
+ if isinstance(fslocation, tuple):
+ fslocation = "%s:%d" % fslocation
+ warning = WarningReport(code=code, fslocation=fslocation,
+ message=message, nodeid=nodeid)
+ warnings.append(warning)
+
+ def pytest_plugin_registered(self, plugin):
+ if self.config.option.traceconfig:
+ msg = "PLUGIN registered: %s" % (plugin,)
+ # XXX this event may happen during setup/teardown time
+ # which unfortunately captures our output here
+ # which garbles our output if we use self.write_line
+ self.write_line(msg)
+
+ def pytest_deselected(self, items):
+ self.stats.setdefault('deselected', []).extend(items)
+
+ def pytest_runtest_logstart(self, nodeid, location):
+ # ensure that the path is printed before the
+ # 1st test of a module starts running
+ if self.showlongtestinfo:
+ line = self._locationline(nodeid, *location)
+ self.write_ensure_prefix(line, "")
+ elif self.showfspath:
+ fsid = nodeid.split("::")[0]
+ self.write_fspath_result(fsid, "")
+
+ def pytest_runtest_logreport(self, report):
+ rep = report
+ res = self.config.hook.pytest_report_teststatus(report=rep)
+ cat, letter, word = res
+ self.stats.setdefault(cat, []).append(rep)
+ self._tests_ran = True
+ if not letter and not word:
+ # probably passed setup/teardown
+ return
+ if self.verbosity <= 0:
+ if not hasattr(rep, 'node') and self.showfspath:
+ self.write_fspath_result(rep.nodeid, letter)
+ else:
+ self._tw.write(letter)
+ else:
+ if isinstance(word, tuple):
+ word, markup = word
+ else:
+ if rep.passed:
+ markup = {'green':True}
+ elif rep.failed:
+ markup = {'red':True}
+ elif rep.skipped:
+ markup = {'yellow':True}
+ line = self._locationline(rep.nodeid, *rep.location)
+ if not hasattr(rep, 'node'):
+ self.write_ensure_prefix(line, word, **markup)
+ #self._tw.write(word, **markup)
+ else:
+ self.ensure_newline()
+ if hasattr(rep, 'node'):
+ self._tw.write("[%s] " % rep.node.gateway.id)
+ self._tw.write(word, **markup)
+ self._tw.write(" " + line)
+ self.currentfspath = -2
+
+ def pytest_collection(self):
+ if not self.isatty and self.config.option.verbose >= 1:
+ self.write("collecting ... ", bold=True)
+
+ def pytest_collectreport(self, report):
+ if report.failed:
+ self.stats.setdefault("error", []).append(report)
+ elif report.skipped:
+ self.stats.setdefault("skipped", []).append(report)
+ items = [x for x in report.result if isinstance(x, pytest.Item)]
+ self._numcollected += len(items)
+ if self.isatty:
+ #self.write_fspath_result(report.nodeid, 'E')
+ self.report_collect()
+
+ def report_collect(self, final=False):
+ if self.config.option.verbose < 0:
+ return
+
+ errors = len(self.stats.get('error', []))
+ skipped = len(self.stats.get('skipped', []))
+ if final:
+ line = "collected "
+ else:
+ line = "collecting "
+ line += str(self._numcollected) + " items"
+ if errors:
+ line += " / %d errors" % errors
+ if skipped:
+ line += " / %d skipped" % skipped
+ if self.isatty:
+ if final:
+ line += " \n"
+ self.rewrite(line, bold=True)
+ else:
+ self.write_line(line)
+
+ def pytest_collection_modifyitems(self):
+ self.report_collect(True)
+
+ @pytest.hookimpl(trylast=True)
+ def pytest_sessionstart(self, session):
+ self._sessionstarttime = time.time()
+ if not self.showheader:
+ return
+ self.write_sep("=", "test session starts", bold=True)
+ verinfo = platform.python_version()
+ msg = "platform %s -- Python %s" % (sys.platform, verinfo)
+ if hasattr(sys, 'pypy_version_info'):
+ verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
+ msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
+ msg += ", pytest-%s, py-%s, pluggy-%s" % (
+ pytest.__version__, py.__version__, pluggy.__version__)
+ if self.verbosity > 0 or self.config.option.debug or \
+ getattr(self.config.option, 'pastebin', None):
+ msg += " -- " + str(sys.executable)
+ self.write_line(msg)
+ lines = self.config.hook.pytest_report_header(
+ config=self.config, startdir=self.startdir)
+ lines.reverse()
+ for line in flatten(lines):
+ self.write_line(line)
+
+ def pytest_report_header(self, config):
+ inifile = ""
+ if config.inifile:
+ inifile = config.rootdir.bestrelpath(config.inifile)
+ lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
+
+ plugininfo = config.pluginmanager.list_plugin_distinfo()
+ if plugininfo:
+
+ lines.append(
+ "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
+ return lines
+
+ def pytest_collection_finish(self, session):
+ if self.config.option.collectonly:
+ self._printcollecteditems(session.items)
+ if self.stats.get('failed'):
+ self._tw.sep("!", "collection failures")
+ for rep in self.stats.get('failed'):
+ rep.toterminal(self._tw)
+ return 1
+ return 0
+ if not self.showheader:
+ return
+ #for i, testarg in enumerate(self.config.args):
+ # self.write_line("test path %d: %s" %(i+1, testarg))
+
+ def _printcollecteditems(self, items):
+ # to print out items and their parent collectors
+ # we take care to leave out Instances aka ()
+ # because later versions are going to get rid of them anyway
+ if self.config.option.verbose < 0:
+ if self.config.option.verbose < -1:
+ counts = {}
+ for item in items:
+ name = item.nodeid.split('::', 1)[0]
+ counts[name] = counts.get(name, 0) + 1
+ for name, count in sorted(counts.items()):
+ self._tw.line("%s: %d" % (name, count))
+ else:
+ for item in items:
+ nodeid = item.nodeid
+ nodeid = nodeid.replace("::()::", "::")
+ self._tw.line(nodeid)
+ return
+ stack = []
+ indent = ""
+ for item in items:
+ needed_collectors = item.listchain()[1:] # strip root node
+ while stack:
+ if stack == needed_collectors[:len(stack)]:
+ break
+ stack.pop()
+ for col in needed_collectors[len(stack):]:
+ stack.append(col)
+ #if col.name == "()":
+ # continue
+ indent = (len(stack) - 1) * " "
+ self._tw.line("%s%s" % (indent, col))
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_sessionfinish(self, exitstatus):
+ outcome = yield
+ outcome.get_result()
+ self._tw.line("")
+ summary_exit_codes = (
+ EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
+ EXIT_NOTESTSCOLLECTED)
+ if exitstatus in summary_exit_codes:
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
+ self.summary_errors()
+ self.summary_failures()
+ self.summary_warnings()
+ self.summary_passes()
+ if exitstatus == EXIT_INTERRUPTED:
+ self._report_keyboardinterrupt()
+ del self._keyboardinterrupt_memo
+ self.summary_deselected()
+ self.summary_stats()
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
+
+ def pytest_unconfigure(self):
+ if hasattr(self, '_keyboardinterrupt_memo'):
+ self._report_keyboardinterrupt()
+
+ def _report_keyboardinterrupt(self):
+ excrepr = self._keyboardinterrupt_memo
+ msg = excrepr.reprcrash.message
+ self.write_sep("!", msg)
+ if "KeyboardInterrupt" in msg:
+ if self.config.option.fulltrace:
+ excrepr.toterminal(self._tw)
+ else:
+ self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
+ excrepr.reprcrash.toterminal(self._tw)
+
+ def _locationline(self, nodeid, fspath, lineno, domain):
+ def mkrel(nodeid):
+ line = self.config.cwd_relative_nodeid(nodeid)
+ if domain and line.endswith(domain):
+ line = line[:-len(domain)]
+ l = domain.split("[")
+ l[0] = l[0].replace('.', '::') # don't replace '.' in params
+ line += "[".join(l)
+ return line
+ # collect_fspath comes from testid which has a "/"-normalized path
+
+ if fspath:
+ res = mkrel(nodeid).replace("::()", "") # parens-normalization
+ if nodeid.split("::")[0] != fspath.replace("\\", "/"):
+ res += " <- " + self.startdir.bestrelpath(fspath)
+ else:
+ res = "[location]"
+ return res + " "
+
+ def _getfailureheadline(self, rep):
+ if hasattr(rep, 'location'):
+ fspath, lineno, domain = rep.location
+ return domain
+ else:
+ return "test session" # XXX?
+
+ def _getcrashline(self, rep):
+ try:
+ return str(rep.longrepr.reprcrash)
+ except AttributeError:
+ try:
+ return str(rep.longrepr)[:50]
+ except AttributeError:
+ return ""
+
+ #
+ # summaries for sessionfinish
+ #
+ def getreports(self, name):
+ l = []
+ for x in self.stats.get(name, []):
+ if not hasattr(x, '_pdbshown'):
+ l.append(x)
+ return l
+
+ def summary_warnings(self):
+ if self.hasopt("w"):
+ warnings = self.stats.get("warnings")
+ if not warnings:
+ return
+ self.write_sep("=", "pytest-warning summary")
+ for w in warnings:
+ self._tw.line("W%s %s %s" % (w.code,
+ w.fslocation, w.message))
+
+ def summary_passes(self):
+ if self.config.option.tbstyle != "no":
+ if self.hasopt("P"):
+ reports = self.getreports('passed')
+ if not reports:
+ return
+ self.write_sep("=", "PASSES")
+ for rep in reports:
+ msg = self._getfailureheadline(rep)
+ self.write_sep("_", msg)
+ self._outrep_summary(rep)
+
+ def summary_failures(self):
+ if self.config.option.tbstyle != "no":
+ reports = self.getreports('failed')
+ if not reports:
+ return
+ self.write_sep("=", "FAILURES")
+ for rep in reports:
+ if self.config.option.tbstyle == "line":
+ line = self._getcrashline(rep)
+ self.write_line(line)
+ else:
+ msg = self._getfailureheadline(rep)
+ markup = {'red': True, 'bold': True}
+ self.write_sep("_", msg, **markup)
+ self._outrep_summary(rep)
+
+ def summary_errors(self):
+ if self.config.option.tbstyle != "no":
+ reports = self.getreports('error')
+ if not reports:
+ return
+ self.write_sep("=", "ERRORS")
+ for rep in self.stats['error']:
+ msg = self._getfailureheadline(rep)
+ if not hasattr(rep, 'when'):
+ # collect
+ msg = "ERROR collecting " + msg
+ elif rep.when == "setup":
+ msg = "ERROR at setup of " + msg
+ elif rep.when == "teardown":
+ msg = "ERROR at teardown of " + msg
+ self.write_sep("_", msg)
+ self._outrep_summary(rep)
+
+ def _outrep_summary(self, rep):
+ rep.toterminal(self._tw)
+ for secname, content in rep.sections:
+ self._tw.sep("-", secname)
+ if content[-1:] == "\n":
+ content = content[:-1]
+ self._tw.line(content)
+
+ def summary_stats(self):
+ session_duration = time.time() - self._sessionstarttime
+ (line, color) = build_summary_stats_line(self.stats)
+ msg = "%s in %.2f seconds" % (line, session_duration)
+ markup = {color: True, 'bold': True}
+
+ if self.verbosity >= 0:
+ self.write_sep("=", msg, **markup)
+ if self.verbosity == -1:
+ self.write_line(msg, **markup)
+
+ def summary_deselected(self):
+ if 'deselected' in self.stats:
+ l = []
+ k = self.config.option.keyword
+ if k:
+ l.append("-k%s" % k)
+ m = self.config.option.markexpr
+ if m:
+ l.append("-m %r" % m)
+ if l:
+ self.write_sep("=", "%d tests deselected by %r" % (
+ len(self.stats['deselected']), " ".join(l)), bold=True)
+
+def repr_pythonversion(v=None):
+ if v is None:
+ v = sys.version_info
+ try:
+ return "%s.%s.%s-%s-%s" % v
+ except (TypeError, ValueError):
+ return str(v)
+
+def flatten(l):
+ for x in l:
+ if isinstance(x, (list, tuple)):
+ for y in flatten(x):
+ yield y
+ else:
+ yield x
+
+def build_summary_stats_line(stats):
+ keys = ("failed passed skipped deselected "
+ "xfailed xpassed warnings error").split()
+ key_translation = {'warnings': 'pytest-warnings'}
+ unknown_key_seen = False
+ for key in stats.keys():
+ if key not in keys:
+ if key: # setup/teardown reports have an empty key, ignore them
+ keys.append(key)
+ unknown_key_seen = True
+ parts = []
+ for key in keys:
+ val = stats.get(key, None)
+ if val:
+ key_name = key_translation.get(key, key)
+ parts.append("%d %s" % (len(val), key_name))
+
+ if parts:
+ line = ", ".join(parts)
+ else:
+ line = "no tests ran"
+
+ if 'failed' in stats or 'error' in stats:
+ color = 'red'
+ elif 'warnings' in stats or unknown_key_seen:
+ color = 'yellow'
+ elif 'passed' in stats:
+ color = 'green'
+ else:
+ color = 'yellow'
+
+ return (line, color)
+
+
+def _plugin_nameversions(plugininfo):
+ l = []
+ for plugin, dist in plugininfo:
+ # gets us name and version!
+ name = '{dist.project_name}-{dist.version}'.format(dist=dist)
+ # questionable convenience, but it keeps things short
+ if name.startswith("pytest-"):
+ name = name[7:]
+ # we decided to print python package names
+ # they can have more than one plugin
+ if name not in l:
+ l.append(name)
+ return l
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/tmpdir.py b/testing/web-platform/tests/tools/pytest/_pytest/tmpdir.py
new file mode 100644
index 000000000..ebc48dbe5
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/tmpdir.py
@@ -0,0 +1,123 @@
+""" support for providing temporary directories to test functions. """
+import re
+
+import pytest
+import py
+from _pytest.monkeypatch import monkeypatch
+
+
+class TempdirFactory:
+ """Factory for temporary directories under the common base temp directory.
+
+ The base directory can be configured using the ``--basetemp`` option.
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.trace = config.trace.get("tmpdir")
+
+ def ensuretemp(self, string, dir=1):
+ """ (deprecated) return temporary directory path with
+ the given string as the trailing part. It is usually
+ better to use the 'tmpdir' function argument which
+ provides an empty unique-per-test-invocation directory
+ and is guaranteed to be empty.
+ """
+ #py.log._apiwarn(">1.1", "use tmpdir function argument")
+ return self.getbasetemp().ensure(string, dir=dir)
+
+ def mktemp(self, basename, numbered=True):
+ """Create a subdirectory of the base temporary directory and return it.
+ If ``numbered``, ensure the directory is unique by adding a number
+ prefix greater than any existing one.
+ """
+ basetemp = self.getbasetemp()
+ if not numbered:
+ p = basetemp.mkdir(basename)
+ else:
+ p = py.path.local.make_numbered_dir(prefix=basename,
+ keep=0, rootdir=basetemp, lock_timeout=None)
+ self.trace("mktemp", p)
+ return p
+
+ def getbasetemp(self):
+ """ return base temporary directory. """
+ try:
+ return self._basetemp
+ except AttributeError:
+ basetemp = self.config.option.basetemp
+ if basetemp:
+ basetemp = py.path.local(basetemp)
+ if basetemp.check():
+ basetemp.remove()
+ basetemp.mkdir()
+ else:
+ temproot = py.path.local.get_temproot()
+ user = get_user()
+ if user:
+ # use a sub-directory in the temproot to speed-up
+ # make_numbered_dir() call
+ rootdir = temproot.join('pytest-of-%s' % user)
+ else:
+ rootdir = temproot
+ rootdir.ensure(dir=1)
+ basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
+ rootdir=rootdir)
+ self._basetemp = t = basetemp.realpath()
+ self.trace("new basetemp", t)
+ return t
+
+ def finish(self):
+ self.trace("finish")
+
+
+def get_user():
+ """Return the current user name, or None if getuser() does not work
+ in the current environment (see #1010).
+ """
+ import getpass
+ try:
+ return getpass.getuser()
+ except (ImportError, KeyError):
+ return None
+
+# backward compatibility
+TempdirHandler = TempdirFactory
+
+
+def pytest_configure(config):
+ """Create a TempdirFactory and attach it to the config object.
+
+ This is to comply with existing plugins which expect the handler to be
+ available at pytest_configure time, but ideally should be moved entirely
+ to the tmpdir_factory session fixture.
+ """
+ mp = monkeypatch()
+ t = TempdirFactory(config)
+ config._cleanup.extend([mp.undo, t.finish])
+ mp.setattr(config, '_tmpdirhandler', t, raising=False)
+ mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
+
+
+@pytest.fixture(scope='session')
+def tmpdir_factory(request):
+ """Return a TempdirFactory instance for the test session.
+ """
+ return request.config._tmpdirhandler
+
+
+@pytest.fixture
+def tmpdir(request, tmpdir_factory):
+ """return a temporary directory path object
+ which is unique to each test function invocation,
+ created as a sub directory of the base temporary
+ directory. The returned object is a `py.path.local`_
+ path object.
+ """
+ name = request.node.name
+ name = re.sub("[\W]", "_", name)
+ MAXVAL = 30
+ if len(name) > MAXVAL:
+ name = name[:MAXVAL]
+ x = tmpdir_factory.mktemp(name, numbered=True)
+ return x
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/unittest.py b/testing/web-platform/tests/tools/pytest/_pytest/unittest.py
new file mode 100644
index 000000000..8120e94fb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/unittest.py
@@ -0,0 +1,205 @@
+""" discovery and running of std-library "unittest" style tests. """
+from __future__ import absolute_import
+
+import sys
+import traceback
+
+import pytest
+# for transfering markers
+import _pytest._code
+from _pytest.python import transfer_markers
+from _pytest.skipping import MarkEvaluator
+
+
+def pytest_pycollect_makeitem(collector, name, obj):
+ # has unittest been imported and is obj a subclass of its TestCase?
+ try:
+ if not issubclass(obj, sys.modules["unittest"].TestCase):
+ return
+ except Exception:
+ return
+ # yes, so let's collect it
+ return UnitTestCase(name, parent=collector)
+
+
+class UnitTestCase(pytest.Class):
+ # marker for fixturemanger.getfixtureinfo()
+ # to declare that our children do not support funcargs
+ nofuncargs = True
+
+ def setup(self):
+ cls = self.obj
+ if getattr(cls, '__unittest_skip__', False):
+ return # skipped
+ setup = getattr(cls, 'setUpClass', None)
+ if setup is not None:
+ setup()
+ teardown = getattr(cls, 'tearDownClass', None)
+ if teardown is not None:
+ self.addfinalizer(teardown)
+ super(UnitTestCase, self).setup()
+
+ def collect(self):
+ from unittest import TestLoader
+ cls = self.obj
+ if not getattr(cls, "__test__", True):
+ return
+ self.session._fixturemanager.parsefactories(self, unittest=True)
+ loader = TestLoader()
+ module = self.getparent(pytest.Module).obj
+ foundsomething = False
+ for name in loader.getTestCaseNames(self.obj):
+ x = getattr(self.obj, name)
+ funcobj = getattr(x, 'im_func', x)
+ transfer_markers(funcobj, cls, module)
+ yield TestCaseFunction(name, parent=self)
+ foundsomething = True
+
+ if not foundsomething:
+ runtest = getattr(self.obj, 'runTest', None)
+ if runtest is not None:
+ ut = sys.modules.get("twisted.trial.unittest", None)
+ if ut is None or runtest != ut.TestCase.runTest:
+ yield TestCaseFunction('runTest', parent=self)
+
+
+
+class TestCaseFunction(pytest.Function):
+ _excinfo = None
+
+ def setup(self):
+ self._testcase = self.parent.obj(self.name)
+ self._fix_unittest_skip_decorator()
+ self._obj = getattr(self._testcase, self.name)
+ if hasattr(self._testcase, 'setup_method'):
+ self._testcase.setup_method(self._obj)
+ if hasattr(self, "_request"):
+ self._request._fillfixtures()
+
+ def _fix_unittest_skip_decorator(self):
+ """
+ The @unittest.skip decorator calls functools.wraps(self._testcase)
+ The call to functools.wraps() fails unless self._testcase
+ has a __name__ attribute. This is usually automatically supplied
+ if the test is a function or method, but we need to add manually
+ here.
+
+ See issue #1169
+ """
+ if sys.version_info[0] == 2:
+ setattr(self._testcase, "__name__", self.name)
+
+ def teardown(self):
+ if hasattr(self._testcase, 'teardown_method'):
+ self._testcase.teardown_method(self._obj)
+
+ def startTest(self, testcase):
+ pass
+
+ def _addexcinfo(self, rawexcinfo):
+ # unwrap potential exception info (see twisted trial support below)
+ rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
+ try:
+ excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
+ except TypeError:
+ try:
+ try:
+ l = traceback.format_exception(*rawexcinfo)
+ l.insert(0, "NOTE: Incompatible Exception Representation, "
+ "displaying natively:\n\n")
+ pytest.fail("".join(l), pytrace=False)
+ except (pytest.fail.Exception, KeyboardInterrupt):
+ raise
+ except:
+ pytest.fail("ERROR: Unknown Incompatible Exception "
+ "representation:\n%r" %(rawexcinfo,), pytrace=False)
+ except KeyboardInterrupt:
+ raise
+ except pytest.fail.Exception:
+ excinfo = _pytest._code.ExceptionInfo()
+ self.__dict__.setdefault('_excinfo', []).append(excinfo)
+
+ def addError(self, testcase, rawexcinfo):
+ self._addexcinfo(rawexcinfo)
+ def addFailure(self, testcase, rawexcinfo):
+ self._addexcinfo(rawexcinfo)
+
+ def addSkip(self, testcase, reason):
+ try:
+ pytest.skip(reason)
+ except pytest.skip.Exception:
+ self._evalskip = MarkEvaluator(self, 'SkipTest')
+ self._evalskip.result = True
+ self._addexcinfo(sys.exc_info())
+
+ def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
+ try:
+ pytest.xfail(str(reason))
+ except pytest.xfail.Exception:
+ self._addexcinfo(sys.exc_info())
+
+ def addUnexpectedSuccess(self, testcase, reason=""):
+ self._unexpectedsuccess = reason
+
+ def addSuccess(self, testcase):
+ pass
+
+ def stopTest(self, testcase):
+ pass
+
+ def runtest(self):
+ self._testcase(result=self)
+
+ def _prunetraceback(self, excinfo):
+ pytest.Function._prunetraceback(self, excinfo)
+ traceback = excinfo.traceback.filter(
+ lambda x:not x.frame.f_globals.get('__unittest'))
+ if traceback:
+ excinfo.traceback = traceback
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_makereport(item, call):
+ if isinstance(item, TestCaseFunction):
+ if item._excinfo:
+ call.excinfo = item._excinfo.pop(0)
+ try:
+ del call.result
+ except AttributeError:
+ pass
+
+# twisted trial support
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_protocol(item):
+ if isinstance(item, TestCaseFunction) and \
+ 'twisted.trial.unittest' in sys.modules:
+ ut = sys.modules['twisted.python.failure']
+ Failure__init__ = ut.Failure.__init__
+ check_testcase_implements_trial_reporter()
+ def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
+ captureVars=None):
+ if exc_value is None:
+ self._rawexcinfo = sys.exc_info()
+ else:
+ if exc_type is None:
+ exc_type = type(exc_value)
+ self._rawexcinfo = (exc_type, exc_value, exc_tb)
+ try:
+ Failure__init__(self, exc_value, exc_type, exc_tb,
+ captureVars=captureVars)
+ except TypeError:
+ Failure__init__(self, exc_value, exc_type, exc_tb)
+ ut.Failure.__init__ = excstore
+ yield
+ ut.Failure.__init__ = Failure__init__
+ else:
+ yield
+
+
+def check_testcase_implements_trial_reporter(done=[]):
+ if done:
+ return
+ from zope.interface import classImplements
+ from twisted.trial.itrial import IReporter
+ classImplements(TestCaseFunction, IReporter)
+ done.append(1)
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/README.md b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/README.md
new file mode 100644
index 000000000..eab7c714f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/README.md
@@ -0,0 +1,13 @@
+This directory vendors the `pluggy` module.
+
+For a more detailed discussion for the reasons to vendoring this
+package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
+
+To update the current version, execute:
+
+```
+$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
+```
+
+And commit the modified files. The `pluggy-<version>.dist-info` directory
+created by `pip` should be ignored.
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/__init__.py b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/__init__.py
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
new file mode 100644
index 000000000..aa3bbf812
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,10 @@
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
new file mode 100644
index 000000000..ec81f0a6b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
@@ -0,0 +1,39 @@
+Metadata-Version: 2.0
+Name: pluggy
+Version: 0.3.1
+Summary: plugin and hook calling mechanisms for python
+Home-page: UNKNOWN
+Author: Holger Krekel
+Author-email: holger at merlinux.eu
+License: MIT license
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: win32
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
new file mode 100644
index 000000000..9626673c4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
@@ -0,0 +1,8 @@
+pluggy.py,sha256=v_RfWzyW6DPU1cJu_EFoL_OHq3t13qloVdR6UaMCXQA,29862
+pluggy-0.3.1.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
+pluggy-0.3.1.dist-info/pbr.json,sha256=xX3s6__wOcAyF-AZJX1sdZyW6PUXT-FkfBlM69EEUCg,47
+pluggy-0.3.1.dist-info/RECORD,,
+pluggy-0.3.1.dist-info/metadata.json,sha256=nLKltOT78dMV-00uXD6Aeemp4xNsz2q59j6ORSDeLjw,1027
+pluggy-0.3.1.dist-info/METADATA,sha256=1b85Ho2u4iK30M099k7axMzcDDhLcIMb-A82JUJZnSo,1334
+pluggy-0.3.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+pluggy-0.3.1.dist-info/DESCRIPTION.rst,sha256=P5Akh1EdIBR6CeqtV2P8ZwpGSpZiTKPw0NyS7jEiD-g,306
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
new file mode 100644
index 000000000..9dff69d86
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
new file mode 100644
index 000000000..426a3a7ad
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
@@ -0,0 +1 @@
+{"license": "MIT license", "name": "pluggy", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "plugin and hook calling mechanisms for python", "platform": "unix", "version": "0.3.1", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "holger at merlinux.eu", "name": "Holger Krekel"}]}}, "classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"]} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
new file mode 100644
index 000000000..d6b798640
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "7d4c9cd"} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
new file mode 100644
index 000000000..11bdb5c1f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pluggy
diff --git a/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy.py b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy.py
new file mode 100644
index 000000000..2f848b23d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/_pytest/vendored_packages/pluggy.py
@@ -0,0 +1,777 @@
+"""
+PluginManager, basic initialization and tracing.
+
+pluggy is the cristallized core of plugin management as used
+by some 150 plugins for pytest.
+
+Pluggy uses semantic versioning. Breaking changes are only foreseen for
+Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in
+your project you should thus use a dependency restriction like
+"pluggy>=0.1.0,<1.0" to avoid surprises.
+
+pluggy is concerned with hook specification, hook implementations and hook
+calling. For any given hook specification a hook call invokes up to N implementations.
+A hook implementation can influence its position and type of execution:
+if attributed "tryfirst" or "trylast" it will be tried to execute
+first or last. However, if attributed "hookwrapper" an implementation
+can wrap all calls to non-hookwrapper implementations. A hookwrapper
+can thus execute some code ahead and after the execution of other hooks.
+
+Hook specification is done by way of a regular python function where
+both the function name and the names of all its arguments are significant.
+Each hook implementation function is verified against the original specification
+function, including the names of all its arguments. To allow for hook specifications
+to evolve over the livetime of a project, hook implementations can
+accept less arguments. One can thus add new arguments and semantics to
+a hook specification by adding another argument typically without breaking
+existing hook implementations.
+
+The chosen approach is meant to let a hook designer think carefuly about
+which objects are needed by an extension writer. By contrast, subclass-based
+extension mechanisms often expose a lot more state and behaviour than needed,
+thus restricting future developments.
+
+Pluggy currently consists of functionality for:
+
+- a way to register new hook specifications. Without a hook
+ specification no hook calling can be performed.
+
+- a registry of plugins which contain hook implementation functions. It
+ is possible to register plugins for which a hook specification is not yet
+ known and validate all hooks when the system is in a more referentially
+ consistent state. Setting an "optionalhook" attribution to a hook
+ implementation will avoid PluginValidationError's if a specification
+ is missing. This allows to have optional integration between plugins.
+
+- a "hook" relay object from which you can launch 1:N calls to
+ registered hook implementation functions
+
+- a mechanism for ordering hook implementation functions
+
+- mechanisms for two different type of 1:N calls: "firstresult" for when
+ the call should stop when the first implementation returns a non-None result.
+ And the other (default) way of guaranteeing that all hook implementations
+ will be called and their non-None result collected.
+
+- mechanisms for "historic" extension points such that all newly
+ registered functions will receive all hook calls that happened
+ before their registration.
+
+- a mechanism for discovering plugin objects which are based on
+ setuptools based entry points.
+
+- a simple tracing mechanism, including tracing of plugin calls and
+ their arguments.
+
+"""
+import sys
+import inspect
+
+__version__ = '0.3.1'
+__all__ = ["PluginManager", "PluginValidationError",
+ "HookspecMarker", "HookimplMarker"]
+
+_py3 = sys.version_info > (3, 0)
+
+
+class HookspecMarker:
+ """ Decorator helper class for marking functions as hook specifications.
+
+ You can instantiate it with a project_name to get a decorator.
+ Calling PluginManager.add_hookspecs later will discover all marked functions
+ if the PluginManager uses the same project_name.
+ """
+
+ def __init__(self, project_name):
+ self.project_name = project_name
+
+ def __call__(self, function=None, firstresult=False, historic=False):
+ """ if passed a function, directly sets attributes on the function
+ which will make it discoverable to add_hookspecs(). If passed no
+ function, returns a decorator which can be applied to a function
+ later using the attributes supplied.
+
+ If firstresult is True the 1:N hook call (N being the number of registered
+ hook implementation functions) will stop at I<=N when the I'th function
+ returns a non-None result.
+
+ If historic is True calls to a hook will be memorized and replayed
+ on later registered plugins.
+
+ """
+ def setattr_hookspec_opts(func):
+ if historic and firstresult:
+ raise ValueError("cannot have a historic firstresult hook")
+ setattr(func, self.project_name + "_spec",
+ dict(firstresult=firstresult, historic=historic))
+ return func
+
+ if function is not None:
+ return setattr_hookspec_opts(function)
+ else:
+ return setattr_hookspec_opts
+
+
+class HookimplMarker:
+ """ Decorator helper class for marking functions as hook implementations.
+
+ You can instantiate with a project_name to get a decorator.
+ Calling PluginManager.register later will discover all marked functions
+ if the PluginManager uses the same project_name.
+ """
+ def __init__(self, project_name):
+ self.project_name = project_name
+
+ def __call__(self, function=None, hookwrapper=False, optionalhook=False,
+ tryfirst=False, trylast=False):
+
+ """ if passed a function, directly sets attributes on the function
+ which will make it discoverable to register(). If passed no function,
+ returns a decorator which can be applied to a function later using
+ the attributes supplied.
+
+ If optionalhook is True a missing matching hook specification will not result
+ in an error (by default it is an error if no matching spec is found).
+
+ If tryfirst is True this hook implementation will run as early as possible
+ in the chain of N hook implementations for a specfication.
+
+ If trylast is True this hook implementation will run as late as possible
+ in the chain of N hook implementations.
+
+ If hookwrapper is True the hook implementations needs to execute exactly
+ one "yield". The code before the yield is run early before any non-hookwrapper
+ function is run. The code after the yield is run after all non-hookwrapper
+ function have run. The yield receives an ``_CallOutcome`` object representing
+ the exception or result outcome of the inner calls (including other hookwrapper
+ calls).
+
+ """
+ def setattr_hookimpl_opts(func):
+ setattr(func, self.project_name + "_impl",
+ dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
+ tryfirst=tryfirst, trylast=trylast))
+ return func
+
+ if function is None:
+ return setattr_hookimpl_opts
+ else:
+ return setattr_hookimpl_opts(function)
+
+
+def normalize_hookimpl_opts(opts):
+ opts.setdefault("tryfirst", False)
+ opts.setdefault("trylast", False)
+ opts.setdefault("hookwrapper", False)
+ opts.setdefault("optionalhook", False)
+
+
+class _TagTracer:
+ def __init__(self):
+ self._tag2proc = {}
+ self.writer = None
+ self.indent = 0
+
+ def get(self, name):
+ return _TagTracerSub(self, (name,))
+
+ def format_message(self, tags, args):
+ if isinstance(args[-1], dict):
+ extra = args[-1]
+ args = args[:-1]
+ else:
+ extra = {}
+
+ content = " ".join(map(str, args))
+ indent = " " * self.indent
+
+ lines = [
+ "%s%s [%s]\n" % (indent, content, ":".join(tags))
+ ]
+
+ for name, value in extra.items():
+ lines.append("%s %s: %s\n" % (indent, name, value))
+ return lines
+
+ def processmessage(self, tags, args):
+ if self.writer is not None and args:
+ lines = self.format_message(tags, args)
+ self.writer(''.join(lines))
+ try:
+ self._tag2proc[tags](tags, args)
+ except KeyError:
+ pass
+
+ def setwriter(self, writer):
+ self.writer = writer
+
+ def setprocessor(self, tags, processor):
+ if isinstance(tags, str):
+ tags = tuple(tags.split(":"))
+ else:
+ assert isinstance(tags, tuple)
+ self._tag2proc[tags] = processor
+
+
+class _TagTracerSub:
+ def __init__(self, root, tags):
+ self.root = root
+ self.tags = tags
+
+ def __call__(self, *args):
+ self.root.processmessage(self.tags, args)
+
+ def setmyprocessor(self, processor):
+ self.root.setprocessor(self.tags, processor)
+
+ def get(self, name):
+ return self.__class__(self.root, self.tags + (name,))
+
+
+def _raise_wrapfail(wrap_controller, msg):
+ co = wrap_controller.gi_code
+ raise RuntimeError("wrap_controller at %r %s:%d %s" %
+ (co.co_name, co.co_filename, co.co_firstlineno, msg))
+
+
+def _wrapped_call(wrap_controller, func):
+ """ Wrap calling to a function with a generator which needs to yield
+ exactly once. The yield point will trigger calling the wrapped function
+ and return its _CallOutcome to the yield point. The generator then needs
+ to finish (raise StopIteration) in order for the wrapped call to complete.
+ """
+ try:
+ next(wrap_controller) # first yield
+ except StopIteration:
+ _raise_wrapfail(wrap_controller, "did not yield")
+ call_outcome = _CallOutcome(func)
+ try:
+ wrap_controller.send(call_outcome)
+ _raise_wrapfail(wrap_controller, "has second yield")
+ except StopIteration:
+ pass
+ return call_outcome.get_result()
+
+
+class _CallOutcome:
+ """ Outcome of a function call, either an exception or a proper result.
+ Calling the ``get_result`` method will return the result or reraise
+ the exception raised when the function was called. """
+ excinfo = None
+
+ def __init__(self, func):
+ try:
+ self.result = func()
+ except BaseException:
+ self.excinfo = sys.exc_info()
+
+ def force_result(self, result):
+ self.result = result
+ self.excinfo = None
+
+ def get_result(self):
+ if self.excinfo is None:
+ return self.result
+ else:
+ ex = self.excinfo
+ if _py3:
+ raise ex[1].with_traceback(ex[2])
+ _reraise(*ex) # noqa
+
+if not _py3:
+ exec("""
+def _reraise(cls, val, tb):
+ raise cls, val, tb
+""")
+
+
+class _TracedHookExecution:
+ def __init__(self, pluginmanager, before, after):
+ self.pluginmanager = pluginmanager
+ self.before = before
+ self.after = after
+ self.oldcall = pluginmanager._inner_hookexec
+ assert not isinstance(self.oldcall, _TracedHookExecution)
+ self.pluginmanager._inner_hookexec = self
+
+ def __call__(self, hook, hook_impls, kwargs):
+ self.before(hook.name, hook_impls, kwargs)
+ outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs))
+ self.after(outcome, hook.name, hook_impls, kwargs)
+ return outcome.get_result()
+
+ def undo(self):
+ self.pluginmanager._inner_hookexec = self.oldcall
+
+
+class PluginManager(object):
+ """ Core Pluginmanager class which manages registration
+ of plugin objects and 1:N hook calling.
+
+ You can register new hooks by calling ``addhooks(module_or_class)``.
+ You can register plugin objects (which contain hooks) by calling
+ ``register(plugin)``. The Pluginmanager is initialized with a
+ prefix that is searched for in the names of the dict of registered
+ plugin objects. An optional excludefunc allows to blacklist names which
+ are not considered as hooks despite a matching prefix.
+
+ For debugging purposes you can call ``enable_tracing()``
+ which will subsequently send debug information to the trace helper.
+ """
+
+ def __init__(self, project_name, implprefix=None):
+ """ if implprefix is given implementation functions
+ will be recognized if their name matches the implprefix. """
+ self.project_name = project_name
+ self._name2plugin = {}
+ self._plugin2hookcallers = {}
+ self._plugin_distinfo = []
+ self.trace = _TagTracer().get("pluginmanage")
+ self.hook = _HookRelay(self.trace.root.get("hook"))
+ self._implprefix = implprefix
+ self._inner_hookexec = lambda hook, methods, kwargs: \
+ _MultiCall(methods, kwargs, hook.spec_opts).execute()
+
+ def _hookexec(self, hook, methods, kwargs):
+ # called from all hookcaller instances.
+ # enable_tracing will set its own wrapping function at self._inner_hookexec
+ return self._inner_hookexec(hook, methods, kwargs)
+
+ def register(self, plugin, name=None):
+ """ Register a plugin and return its canonical name or None if the name
+ is blocked from registering. Raise a ValueError if the plugin is already
+ registered. """
+ plugin_name = name or self.get_canonical_name(plugin)
+
+ if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
+ if self._name2plugin.get(plugin_name, -1) is None:
+ return # blocked plugin, return None to indicate no registration
+ raise ValueError("Plugin already registered: %s=%s\n%s" %
+ (plugin_name, plugin, self._name2plugin))
+
+ # XXX if an error happens we should make sure no state has been
+ # changed at point of return
+ self._name2plugin[plugin_name] = plugin
+
+ # register matching hook implementations of the plugin
+ self._plugin2hookcallers[plugin] = hookcallers = []
+ for name in dir(plugin):
+ hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
+ if hookimpl_opts is not None:
+ normalize_hookimpl_opts(hookimpl_opts)
+ method = getattr(plugin, name)
+ hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
+ hook = getattr(self.hook, name, None)
+ if hook is None:
+ hook = _HookCaller(name, self._hookexec)
+ setattr(self.hook, name, hook)
+ elif hook.has_spec():
+ self._verify_hook(hook, hookimpl)
+ hook._maybe_apply_history(hookimpl)
+ hook._add_hookimpl(hookimpl)
+ hookcallers.append(hook)
+ return plugin_name
+
+ def parse_hookimpl_opts(self, plugin, name):
+ method = getattr(plugin, name)
+ res = getattr(method, self.project_name + "_impl", None)
+ if res is not None and not isinstance(res, dict):
+ # false positive
+ res = None
+ elif res is None and self._implprefix and name.startswith(self._implprefix):
+ res = {}
+ return res
+
+ def unregister(self, plugin=None, name=None):
+ """ unregister a plugin object and all its contained hook implementations
+ from internal data structures. """
+ if name is None:
+ assert plugin is not None, "one of name or plugin needs to be specified"
+ name = self.get_name(plugin)
+
+ if plugin is None:
+ plugin = self.get_plugin(name)
+
+ # if self._name2plugin[name] == None registration was blocked: ignore
+ if self._name2plugin.get(name):
+ del self._name2plugin[name]
+
+ for hookcaller in self._plugin2hookcallers.pop(plugin, []):
+ hookcaller._remove_plugin(plugin)
+
+ return plugin
+
+ def set_blocked(self, name):
+ """ block registrations of the given name, unregister if already registered. """
+ self.unregister(name=name)
+ self._name2plugin[name] = None
+
+ def is_blocked(self, name):
+ """ return True if the name blogs registering plugins of that name. """
+ return name in self._name2plugin and self._name2plugin[name] is None
+
+ def add_hookspecs(self, module_or_class):
+ """ add new hook specifications defined in the given module_or_class.
+ Functions are recognized if they have been decorated accordingly. """
+ names = []
+ for name in dir(module_or_class):
+ spec_opts = self.parse_hookspec_opts(module_or_class, name)
+ if spec_opts is not None:
+ hc = getattr(self.hook, name, None)
+ if hc is None:
+ hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
+ setattr(self.hook, name, hc)
+ else:
+ # plugins registered this hook without knowing the spec
+ hc.set_specification(module_or_class, spec_opts)
+ for hookfunction in (hc._wrappers + hc._nonwrappers):
+ self._verify_hook(hc, hookfunction)
+ names.append(name)
+
+ if not names:
+ raise ValueError("did not find any %r hooks in %r" %
+ (self.project_name, module_or_class))
+
+ def parse_hookspec_opts(self, module_or_class, name):
+ method = getattr(module_or_class, name)
+ return getattr(method, self.project_name + "_spec", None)
+
+ def get_plugins(self):
+ """ return the set of registered plugins. """
+ return set(self._plugin2hookcallers)
+
+ def is_registered(self, plugin):
+ """ Return True if the plugin is already registered. """
+ return plugin in self._plugin2hookcallers
+
+ def get_canonical_name(self, plugin):
+ """ Return canonical name for a plugin object. Note that a plugin
+ may be registered under a different name which was specified
+ by the caller of register(plugin, name). To obtain the name
+ of an registered plugin use ``get_name(plugin)`` instead."""
+ return getattr(plugin, "__name__", None) or str(id(plugin))
+
+ def get_plugin(self, name):
+ """ Return a plugin or None for the given name. """
+ return self._name2plugin.get(name)
+
+ def get_name(self, plugin):
+ """ Return name for registered plugin or None if not registered. """
+ for name, val in self._name2plugin.items():
+ if plugin == val:
+ return name
+
+ def _verify_hook(self, hook, hookimpl):
+ if hook.is_historic() and hookimpl.hookwrapper:
+ raise PluginValidationError(
+ "Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
+ (hookimpl.plugin_name, hook.name))
+
+ for arg in hookimpl.argnames:
+ if arg not in hook.argnames:
+ raise PluginValidationError(
+ "Plugin %r\nhook %r\nargument %r not available\n"
+ "plugin definition: %s\n"
+ "available hookargs: %s" %
+ (hookimpl.plugin_name, hook.name, arg,
+ _formatdef(hookimpl.function), ", ".join(hook.argnames)))
+
+ def check_pending(self):
+ """ Verify that all hooks which have not been verified against
+ a hook specification are optional, otherwise raise PluginValidationError"""
+ for name in self.hook.__dict__:
+ if name[0] != "_":
+ hook = getattr(self.hook, name)
+ if not hook.has_spec():
+ for hookimpl in (hook._wrappers + hook._nonwrappers):
+ if not hookimpl.optionalhook:
+ raise PluginValidationError(
+ "unknown hook %r in plugin %r" %
+ (name, hookimpl.plugin))
+
+ def load_setuptools_entrypoints(self, entrypoint_name):
+ """ Load modules from querying the specified setuptools entrypoint name.
+ Return the number of loaded plugins. """
+ from pkg_resources import iter_entry_points, DistributionNotFound
+ for ep in iter_entry_points(entrypoint_name):
+ # is the plugin registered or blocked?
+ if self.get_plugin(ep.name) or self.is_blocked(ep.name):
+ continue
+ try:
+ plugin = ep.load()
+ except DistributionNotFound:
+ continue
+ self.register(plugin, name=ep.name)
+ self._plugin_distinfo.append((plugin, ep.dist))
+ return len(self._plugin_distinfo)
+
+ def list_plugin_distinfo(self):
+ """ return list of distinfo/plugin tuples for all setuptools registered
+ plugins. """
+ return list(self._plugin_distinfo)
+
+ def list_name_plugin(self):
+ """ return list of name/plugin pairs. """
+ return list(self._name2plugin.items())
+
+ def get_hookcallers(self, plugin):
+ """ get all hook callers for the specified plugin. """
+ return self._plugin2hookcallers.get(plugin)
+
+ def add_hookcall_monitoring(self, before, after):
+ """ add before/after tracing functions for all hooks
+ and return an undo function which, when called,
+ will remove the added tracers.
+
+ ``before(hook_name, hook_impls, kwargs)`` will be called ahead
+ of all hook calls and receive a hookcaller instance, a list
+ of HookImpl instances and the keyword arguments for the hook call.
+
+ ``after(outcome, hook_name, hook_impls, kwargs)`` receives the
+ same arguments as ``before`` but also a :py:class:`_CallOutcome`` object
+ which represents the result of the overall hook call.
+ """
+ return _TracedHookExecution(self, before, after).undo
+
+ def enable_tracing(self):
+ """ enable tracing of hook calls and return an undo function. """
+ hooktrace = self.hook._trace
+
+ def before(hook_name, methods, kwargs):
+ hooktrace.root.indent += 1
+ hooktrace(hook_name, kwargs)
+
+ def after(outcome, hook_name, methods, kwargs):
+ if outcome.excinfo is None:
+ hooktrace("finish", hook_name, "-->", outcome.result)
+ hooktrace.root.indent -= 1
+
+ return self.add_hookcall_monitoring(before, after)
+
+ def subset_hook_caller(self, name, remove_plugins):
+ """ Return a new _HookCaller instance for the named method
+ which manages calls to all registered plugins except the
+ ones from remove_plugins. """
+ orig = getattr(self.hook, name)
+ plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
+ if plugins_to_remove:
+ hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
+ orig.spec_opts)
+ for hookimpl in (orig._wrappers + orig._nonwrappers):
+ plugin = hookimpl.plugin
+ if plugin not in plugins_to_remove:
+ hc._add_hookimpl(hookimpl)
+ # we also keep track of this hook caller so it
+ # gets properly removed on plugin unregistration
+ self._plugin2hookcallers.setdefault(plugin, []).append(hc)
+ return hc
+ return orig
+
+
+class _MultiCall:
+ """ execute a call into multiple python functions/methods. """
+
+ # XXX note that the __multicall__ argument is supported only
+ # for pytest compatibility reasons. It was never officially
+ # supported there and is explicitly deprecated since 2.8
+ # so we can remove it soon, allowing to avoid the below recursion
+ # in execute() and simplify/speed up the execute loop.
+
+ def __init__(self, hook_impls, kwargs, specopts={}):
+ self.hook_impls = hook_impls
+ self.kwargs = kwargs
+ self.kwargs["__multicall__"] = self
+ self.specopts = specopts
+
+ def execute(self):
+ all_kwargs = self.kwargs
+ self.results = results = []
+ firstresult = self.specopts.get("firstresult")
+
+ while self.hook_impls:
+ hook_impl = self.hook_impls.pop()
+ args = [all_kwargs[argname] for argname in hook_impl.argnames]
+ if hook_impl.hookwrapper:
+ return _wrapped_call(hook_impl.function(*args), self.execute)
+ res = hook_impl.function(*args)
+ if res is not None:
+ if firstresult:
+ return res
+ results.append(res)
+
+ if not firstresult:
+ return results
+
+ def __repr__(self):
+ status = "%d meths" % (len(self.hook_impls),)
+ if hasattr(self, "results"):
+ status = ("%d results, " % len(self.results)) + status
+ return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs)
+
+
+def varnames(func, startindex=None):
+ """ return argument name tuple for a function, method, class or callable.
+
+ In case of a class, its "__init__" method is considered.
+ For methods the "self" parameter is not included unless you are passing
+ an unbound method with Python3 (which has no supports for unbound methods)
+ """
+ cache = getattr(func, "__dict__", {})
+ try:
+ return cache["_varnames"]
+ except KeyError:
+ pass
+ if inspect.isclass(func):
+ try:
+ func = func.__init__
+ except AttributeError:
+ return ()
+ startindex = 1
+ else:
+ if not inspect.isfunction(func) and not inspect.ismethod(func):
+ func = getattr(func, '__call__', func)
+ if startindex is None:
+ startindex = int(inspect.ismethod(func))
+
+ try:
+ rawcode = func.__code__
+ except AttributeError:
+ return ()
+ try:
+ x = rawcode.co_varnames[startindex:rawcode.co_argcount]
+ except AttributeError:
+ x = ()
+ else:
+ defaults = func.__defaults__
+ if defaults:
+ x = x[:-len(defaults)]
+ try:
+ cache["_varnames"] = x
+ except TypeError:
+ pass
+ return x
+
+
+class _HookRelay:
+ """ hook holder object for performing 1:N hook calls where N is the number
+ of registered plugins.
+
+ """
+
+ def __init__(self, trace):
+ self._trace = trace
+
+
+class _HookCaller(object):
+ def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
+ self.name = name
+ self._wrappers = []
+ self._nonwrappers = []
+ self._hookexec = hook_execute
+ if specmodule_or_class is not None:
+ assert spec_opts is not None
+ self.set_specification(specmodule_or_class, spec_opts)
+
+ def has_spec(self):
+ return hasattr(self, "_specmodule_or_class")
+
+ def set_specification(self, specmodule_or_class, spec_opts):
+ assert not self.has_spec()
+ self._specmodule_or_class = specmodule_or_class
+ specfunc = getattr(specmodule_or_class, self.name)
+ argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class))
+ assert "self" not in argnames # sanity check
+ self.argnames = ["__multicall__"] + list(argnames)
+ self.spec_opts = spec_opts
+ if spec_opts.get("historic"):
+ self._call_history = []
+
+ def is_historic(self):
+ return hasattr(self, "_call_history")
+
+ def _remove_plugin(self, plugin):
+ def remove(wrappers):
+ for i, method in enumerate(wrappers):
+ if method.plugin == plugin:
+ del wrappers[i]
+ return True
+ if remove(self._wrappers) is None:
+ if remove(self._nonwrappers) is None:
+ raise ValueError("plugin %r not found" % (plugin,))
+
+ def _add_hookimpl(self, hookimpl):
+ if hookimpl.hookwrapper:
+ methods = self._wrappers
+ else:
+ methods = self._nonwrappers
+
+ if hookimpl.trylast:
+ methods.insert(0, hookimpl)
+ elif hookimpl.tryfirst:
+ methods.append(hookimpl)
+ else:
+ # find last non-tryfirst method
+ i = len(methods) - 1
+ while i >= 0 and methods[i].tryfirst:
+ i -= 1
+ methods.insert(i + 1, hookimpl)
+
+ def __repr__(self):
+ return "<_HookCaller %r>" % (self.name,)
+
+ def __call__(self, **kwargs):
+ assert not self.is_historic()
+ return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
+
+ def call_historic(self, proc=None, kwargs=None):
+ self._call_history.append((kwargs or {}, proc))
+ # historizing hooks don't return results
+ self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
+
+ def call_extra(self, methods, kwargs):
+ """ Call the hook with some additional temporarily participating
+ methods using the specified kwargs as call parameters. """
+ old = list(self._nonwrappers), list(self._wrappers)
+ for method in methods:
+ opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
+ hookimpl = HookImpl(None, "<temp>", method, opts)
+ self._add_hookimpl(hookimpl)
+ try:
+ return self(**kwargs)
+ finally:
+ self._nonwrappers, self._wrappers = old
+
+ def _maybe_apply_history(self, method):
+ if self.is_historic():
+ for kwargs, proc in self._call_history:
+ res = self._hookexec(self, [method], kwargs)
+ if res and proc is not None:
+ proc(res[0])
+
+
+class HookImpl:
+ def __init__(self, plugin, plugin_name, function, hook_impl_opts):
+ self.function = function
+ self.argnames = varnames(self.function)
+ self.plugin = plugin
+ self.opts = hook_impl_opts
+ self.plugin_name = plugin_name
+ self.__dict__.update(hook_impl_opts)
+
+
+class PluginValidationError(Exception):
+ """ plugin failed validation. """
+
+
+if hasattr(inspect, 'signature'):
+ def _formatdef(func):
+ return "%s%s" % (
+ func.__name__,
+ str(inspect.signature(func))
+ )
+else:
+ def _formatdef(func):
+ return "%s%s" % (
+ func.__name__,
+ inspect.formatargspec(*inspect.getargspec(func))
+ )
diff --git a/testing/web-platform/tests/tools/pytest/appveyor.yml b/testing/web-platform/tests/tools/pytest/appveyor.yml
new file mode 100644
index 000000000..4b73645f7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/appveyor.yml
@@ -0,0 +1,28 @@
+environment:
+ COVERALLS_REPO_TOKEN:
+ secure: 2NJ5Ct55cHJ9WEg3xbSqCuv0rdgzzb6pnzOIG5OkMbTndw3wOBrXntWFoQrXiMFi
+ # this is pytest's token in coveralls.io, encrypted
+ # using pytestbot account as detailed here:
+ # https://www.appveyor.com/docs/build-configuration#secure-variables
+
+install:
+ - echo Installed Pythons
+ - dir c:\Python*
+
+ # install pypy using choco (redirect to a file and write to console in case
+ # choco install returns non-zero, because choco install python.pypy is too
+ # noisy)
+ - choco install python.pypy > pypy-inst.log 2>&1 || (type pypy-inst.log & exit /b 1)
+ - set PATH=C:\tools\pypy\pypy;%PATH% # so tox can find pypy
+ - echo PyPy installed
+ - pypy --version
+
+ - C:\Python35\python -m pip install tox
+
+build: false # Not a C# project, build stuff at the test step instead.
+
+test_script:
+ - C:\Python35\python -m tox
+ # coveralls is not in tox's envlist, plus for PRs the secure variable
+ # is not defined so we have to check for it
+ - if defined COVERALLS_REPO_TOKEN C:\Python35\python -m tox -e coveralls
diff --git a/testing/web-platform/tests/tools/pytest/bench/bench.py b/testing/web-platform/tests/tools/pytest/bench/bench.py
new file mode 100644
index 000000000..ce9496417
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/bench/bench.py
@@ -0,0 +1,12 @@
+import sys
+
+if __name__ == '__main__':
+ import cProfile
+ import pytest
+ import pstats
+ script = sys.argv[1:] if len(sys.argv) > 1 else "empty.py"
+ stats = cProfile.run('pytest.cmdline.main(%r)' % script, 'prof')
+ p = pstats.Stats("prof")
+ p.strip_dirs()
+ p.sort_stats('cumulative')
+ print(p.print_stats(500))
diff --git a/testing/web-platform/tests/tools/pytest/bench/bench_argcomplete.py b/testing/web-platform/tests/tools/pytest/bench/bench_argcomplete.py
new file mode 100644
index 000000000..d66c664f3
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/bench/bench_argcomplete.py
@@ -0,0 +1,19 @@
+
+
+# 10000 iterations, just for relative comparison
+# 2.7.5 3.3.2
+# FilesCompleter 75.1109 69.2116
+# FastFilesCompleter 0.7383 1.0760
+
+
+if __name__ == '__main__':
+ import sys
+ import timeit
+ from argcomplete.completers import FilesCompleter
+ from _pytest._argcomplete import FastFilesCompleter
+ count = 1000 # only a few seconds
+ setup = 'from __main__ import FastFilesCompleter\nfc = FastFilesCompleter()'
+ run = 'fc("/d")'
+ sys.stdout.write('%s\n' % (timeit.timeit(run,
+ setup=setup.replace('Fast', ''), number=count)))
+ sys.stdout.write('%s\n' % (timeit.timeit(run, setup=setup, number=count)))
diff --git a/testing/web-platform/tests/tools/pytest/bench/empty.py b/testing/web-platform/tests/tools/pytest/bench/empty.py
new file mode 100644
index 000000000..ac5e25701
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/bench/empty.py
@@ -0,0 +1,3 @@
+import py
+for i in range(1000):
+ py.builtin.exec_("def test_func_%d(): pass" % i)
diff --git a/testing/web-platform/tests/tools/pytest/bench/manyparam.py b/testing/web-platform/tests/tools/pytest/bench/manyparam.py
new file mode 100644
index 000000000..d2bca0e8a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/bench/manyparam.py
@@ -0,0 +1,12 @@
+
+import pytest
+
+@pytest.fixture(scope='module', params=range(966))
+def foo(request):
+ return request.param
+
+def test_it(foo):
+ pass
+def test_it2(foo):
+ pass
+
diff --git a/testing/web-platform/tests/tools/pytest/bench/skip.py b/testing/web-platform/tests/tools/pytest/bench/skip.py
new file mode 100644
index 000000000..960b30864
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/bench/skip.py
@@ -0,0 +1,10 @@
+
+import pytest
+
+
+SKIP = True
+
+@pytest.mark.parametrize("x", xrange(5000))
+def test_foo(x):
+ if SKIP:
+ pytest.skip("heh")
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/Makefile b/testing/web-platform/tests/tools/pytest/doc/en/Makefile
new file mode 100644
index 000000000..8621f779c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/Makefile
@@ -0,0 +1,164 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+REGENDOC_ARGS := \
+ --normalize "/={8,} (.*) ={8,}/======= \1 ========/" \
+ --normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \
+ --normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \
+ --normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
+
+
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
+
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " showtarget to show the pytest.org target directory"
+ @echo " install to install docs to pytest.org/SITETARGET"
+ @echo " install-ldf to install the doc pdf to pytest.org/SITETARGET"
+ @echo " regen to regenerate pytest examples using the installed pytest"
+ @echo " linkcheck to check all external links for integrity"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+SITETARGET=$(shell ./_getdoctarget.py)
+
+showtarget:
+ @echo $(SITETARGET)
+
+install: html
+ # for access talk to someone with login rights to
+ # pytest-dev@pytest.org to add your ssh key
+ rsync -avz _build/html/ pytest-dev@pytest.org:pytest.org/$(SITETARGET)
+
+installpdf: latexpdf
+ @scp $(BUILDDIR)/latex/pytest.pdf pytest-dev@pytest.org:pytest.org/$(SITETARGET)
+
+installall: clean install installpdf
+ @echo "done"
+
+regen:
+ PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS}
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytest.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytest.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/pytest"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ make -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+texinfo:
+ mkdir -p $(BUILDDIR)/texinfo
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ mkdir -p $(BUILDDIR)/texinfo
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_getdoctarget.py b/testing/web-platform/tests/tools/pytest/doc/en/_getdoctarget.py
new file mode 100755
index 000000000..20e487bb7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_getdoctarget.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import py
+
+def get_version_string():
+ fn = py.path.local(__file__).join("..", "..", "..",
+ "_pytest", "__init__.py")
+ for line in fn.readlines():
+ if "version" in line and not line.strip().startswith('#'):
+ return eval(line.split("=")[-1])
+
+def get_minor_version_string():
+ return ".".join(get_version_string().split(".")[:2])
+
+if __name__ == "__main__":
+ print (get_minor_version_string())
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_templates/globaltoc.html b/testing/web-platform/tests/tools/pytest/doc/en/_templates/globaltoc.html
new file mode 100644
index 000000000..af427198a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_templates/globaltoc.html
@@ -0,0 +1,18 @@
+<h3><a href="{{ pathto(master_doc) }}">{{ _('Table Of Contents') }}</a></h3>
+
+<ul>
+ <li><a href="{{ pathto('index') }}">Home</a></li>
+ <li><a href="{{ pathto('contents') }}">Contents</a></li>
+ <li><a href="{{ pathto('getting-started') }}">Install</a></li>
+ <li><a href="{{ pathto('example/index') }}">Examples</a></li>
+ <li><a href="{{ pathto('customize') }}">Customize</a></li>
+ <li><a href="{{ pathto('contact') }}">Contact</a></li>
+ <li><a href="{{ pathto('talks') }}">Talks/Posts</a></li>
+ <li><a href="{{ pathto('changelog') }}">Changelog</a></li>
+ <li><a href="{{ pathto('license') }}">License</a></li>
+</ul>
+
+{%- if display_toc %}
+ <hr>
+ {{ toc }}
+{%- endif %}
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_templates/layout.html b/testing/web-platform/tests/tools/pytest/doc/en/_templates/layout.html
new file mode 100644
index 000000000..0ce480be3
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_templates/layout.html
@@ -0,0 +1,34 @@
+{% extends "!layout.html" %}
+{% block header %}
+<div align="center" xmlns="http://www.w3.org/1999/html" style="background-color: lightgreen; padding: .5em">
+ <h4>
+ Want to help improve pytest? Please
+ <a href="https://www.indiegogo.com/projects/python-testing-sprint-mid-2016#/">
+ contribute to
+ </a>
+ or
+ <a href="announce/sprint2016.html">
+ join
+ </a>
+ our upcoming sprint in June 2016!
+
+ </h4>
+</div>
+ {{super()}}
+{% endblock %}
+{% block footer %}
+{{ super() }}
+<script type="text/javascript">
+
+ var _gaq = _gaq || [];
+ _gaq.push(['_setAccount', 'UA-7597274-13']);
+ _gaq.push(['_trackPageview']);
+
+ (function() {
+ var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+ ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+ var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+ })();
+
+</script>
+{% endblock %}
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_templates/links.html b/testing/web-platform/tests/tools/pytest/doc/en/_templates/links.html
new file mode 100644
index 000000000..200258e16
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_templates/links.html
@@ -0,0 +1,16 @@
+<h3>Useful Links</h3>
+<ul>
+ <li>
+ <a href="https://www.indiegogo.com/projects/python-testing-sprint-mid-2016#/">
+ <b>Sprint funding campaign</b>
+ </a>
+ </li>
+ <li><a href="{{ pathto('index') }}">The pytest Website</a></li>
+ <li><a href="{{ pathto('contributing') }}">Contribution Guide</a></li>
+ <li><a href="https://pypi.python.org/pypi/pytest">pytest @ PyPI</a></li>
+ <li><a href="https://github.com/pytest-dev/pytest/">pytest @ GitHub</a></li>
+ <li><a href="http://plugincompat.herokuapp.com/">3rd party plugins</a></li>
+ <li><a href="https://github.com/pytest-dev/pytest/issues">Issue Tracker</a></li>
+ <li><a href="http://pytest.org/latest/pytest.pdf">PDF Documentation</a>
+</ul>
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_templates/sidebarintro.html b/testing/web-platform/tests/tools/pytest/doc/en/_templates/sidebarintro.html
new file mode 100644
index 000000000..ae860c172
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_templates/sidebarintro.html
@@ -0,0 +1,5 @@
+<h3>About pytest</h3>
+<p>
+ pytest is a mature full-featured Python testing tool that helps
+ you write better programs.
+</p>
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/.gitignore b/testing/web-platform/tests/tools/pytest/doc/en/_themes/.gitignore
new file mode 100644
index 000000000..66b6e4c2f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/.gitignore
@@ -0,0 +1,3 @@
+*.pyc
+*.pyo
+.DS_Store
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/LICENSE b/testing/web-platform/tests/tools/pytest/doc/en/_themes/LICENSE
new file mode 100644
index 000000000..8daab7ee6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/LICENSE
@@ -0,0 +1,37 @@
+Copyright (c) 2010 by Armin Ronacher.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms of the theme, with or
+without modification, are permitted provided that the following conditions
+are met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+* The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+We kindly ask you to only use these themes in an unmodified manner just
+for Flask and Flask-related products, not for unrelated projects. If you
+like the visual style and want to use it for your own projects, please
+consider making some larger changes to the themes (such as changing
+font faces, sizes, colors or margins).
+
+THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/README b/testing/web-platform/tests/tools/pytest/doc/en/_themes/README
new file mode 100644
index 000000000..b3292bdff
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/README
@@ -0,0 +1,31 @@
+Flask Sphinx Styles
+===================
+
+This repository contains sphinx styles for Flask and Flask related
+projects. To use this style in your Sphinx documentation, follow
+this guide:
+
+1. put this folder as _themes into your docs folder. Alternatively
+ you can also use git submodules to check out the contents there.
+2. add this to your conf.py:
+
+ sys.path.append(os.path.abspath('_themes'))
+ html_theme_path = ['_themes']
+ html_theme = 'flask'
+
+The following themes exist:
+
+- 'flask' - the standard flask documentation theme for large
+ projects
+- 'flask_small' - small one-page theme. Intended to be used by
+ very small addon libraries for flask.
+
+The following options exist for the flask_small theme:
+
+ [options]
+ index_logo = '' filename of a picture in _static
+ to be used as replacement for the
+ h1 in the index.rst file.
+ index_logo_height = 120px height of the index logo
+ github_fork = '' repository name on github for the
+ "fork me" badge
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/layout.html b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/layout.html
new file mode 100644
index 000000000..19c43fbbe
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/layout.html
@@ -0,0 +1,24 @@
+{%- extends "basic/layout.html" %}
+{%- block extrahead %}
+ {{ super() }}
+ {% if theme_touch_icon %}
+ <link rel="apple-touch-icon" href="{{ pathto('_static/' ~ theme_touch_icon, 1) }}" />
+ {% endif %}
+ <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9">
+{% endblock %}
+{%- block relbar2 %}{% endblock %}
+{% block header %}
+ {{ super() }}
+ {% if pagename == 'index' %}
+ <div class=indexwrapper>
+ {% endif %}
+{% endblock %}
+{%- block footer %}
+ <div class="footer">
+ &copy; Copyright {{ copyright }}.
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a>.
+ </div>
+ {% if pagename == 'index' %}
+ </div>
+ {% endif %}
+{%- endblock %}
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/relations.html b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/relations.html
new file mode 100644
index 000000000..3bbcde85b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/relations.html
@@ -0,0 +1,19 @@
+<h3>Related Topics</h3>
+<ul>
+ <li><a href="{{ pathto(master_doc) }}">Documentation overview</a><ul>
+ {%- for parent in parents %}
+ <li><a href="{{ parent.link|e }}">{{ parent.title }}</a><ul>
+ {%- endfor %}
+ {%- if prev %}
+ <li>Previous: <a href="{{ prev.link|e }}" title="{{ _('previous chapter')
+ }}">{{ prev.title }}</a></li>
+ {%- endif %}
+ {%- if next %}
+ <li>Next: <a href="{{ next.link|e }}" title="{{ _('next chapter')
+ }}">{{ next.title }}</a></li>
+ {%- endif %}
+ {%- for parent in parents %}
+ </ul></li>
+ {%- endfor %}
+ </ul></li>
+</ul>
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/static/flasky.css_t b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/static/flasky.css_t
new file mode 100644
index 000000000..6b593da29
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/static/flasky.css_t
@@ -0,0 +1,557 @@
+/*
+ * flasky.css_t
+ * ~~~~~~~~~~~~
+ *
+ * :copyright: Copyright 2010 by Armin Ronacher.
+ * :license: Flask Design License, see LICENSE for details.
+ */
+
+{% set page_width = '1020px' %}
+{% set sidebar_width = '220px' %}
+/* orange of logo is #d67c29 but we use black for links for now */
+{% set link_color = '#000' %}
+{% set link_hover_color = '#000' %}
+{% set base_font = 'sans-serif' %}
+{% set header_font = 'serif' %}
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: {{ base_font }};
+ font-size: 17px;
+ background-color: white;
+ color: #000;
+ margin: 0;
+ padding: 0;
+}
+
+div.document {
+ width: {{ page_width }};
+ margin: 30px auto 0 auto;
+}
+
+div.documentwrapper {
+ float: left;
+ width: 100%;
+}
+
+div.bodywrapper {
+ margin: 0 0 0 {{ sidebar_width }};
+}
+
+div.sphinxsidebar {
+ width: {{ sidebar_width }};
+}
+
+hr {
+ border: 0;
+ border-top: 1px solid #B1B4B6;
+}
+
+div.body {
+ background-color: #ffffff;
+ color: #3E4349;
+ padding: 0 30px 0 30px;
+}
+
+img.floatingflask {
+ padding: 0 0 10px 10px;
+ float: right;
+}
+
+div.footer {
+ width: {{ page_width }};
+ margin: 20px auto 30px auto;
+ font-size: 14px;
+ color: #888;
+ text-align: right;
+}
+
+div.footer a {
+ color: #888;
+}
+
+div.related {
+ display: none;
+}
+
+div.sphinxsidebar a {
+ color: #444;
+ text-decoration: none;
+ border-bottom: 1px dotted #999;
+}
+
+div.sphinxsidebar a:hover {
+ border-bottom: 1px solid #999;
+}
+
+div.sphinxsidebar {
+ font-size: 14px;
+ line-height: 1.5;
+}
+
+div.sphinxsidebarwrapper {
+ padding: 18px 10px;
+}
+
+div.sphinxsidebarwrapper p.logo {
+ padding: 0 0 20px 0;
+ margin: 0;
+ text-align: center;
+}
+
+div.sphinxsidebar h3,
+div.sphinxsidebar h4 {
+ font-family: {{ header_font }};
+ color: #444;
+ font-size: 24px;
+ font-weight: normal;
+ margin: 0 0 5px 0;
+ padding: 0;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 20px;
+}
+
+div.sphinxsidebar h3 a {
+ color: #444;
+}
+
+div.sphinxsidebar p.logo a,
+div.sphinxsidebar h3 a,
+div.sphinxsidebar p.logo a:hover,
+div.sphinxsidebar h3 a:hover {
+ border: none;
+}
+
+div.sphinxsidebar p {
+ color: #555;
+ margin: 10px 0;
+}
+
+div.sphinxsidebar ul {
+ margin: 10px 0;
+ padding: 0;
+ color: #000;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #ccc;
+ font-family: {{ base_font }};
+ font-size: 1em;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+a {
+ color: {{ link_color }};
+ text-decoration: underline;
+}
+
+a:hover {
+ color: {{ link_hover_color }};
+ text-decoration: underline;
+}
+
+a.reference.internal em {
+ font-style: normal;
+}
+
+div.body h1,
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+ font-family: {{ header_font }};
+ font-weight: normal;
+ margin: 30px 0px 10px 0px;
+ padding: 0;
+}
+
+{% if theme_index_logo %}
+div.indexwrapper h1 {
+ text-indent: -999999px;
+ background: url({{ theme_index_logo }}) no-repeat center center;
+ height: {{ theme_index_logo_height }};
+}
+{% else %}
+div.indexwrapper div.body h1 {
+ font-size: 200%;
+}
+{% endif %}
+div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
+div.body h2 { font-size: 180%; }
+div.body h3 { font-size: 150%; }
+div.body h4 { font-size: 130%; }
+div.body h5 { font-size: 100%; }
+div.body h6 { font-size: 100%; }
+
+a.headerlink {
+ color: #ddd;
+ padding: 0 4px;
+ text-decoration: none;
+}
+
+a.headerlink:hover {
+ color: #444;
+ background: #eaeaea;
+}
+
+div.body p, div.body dd, div.body li {
+ line-height: 1.4em;
+}
+
+div.admonition {
+ background: #fafafa;
+ margin: 20px -30px;
+ padding: 10px 30px;
+ border-top: 1px solid #ccc;
+ border-bottom: 1px solid #ccc;
+}
+
+div.admonition tt.xref, div.admonition a tt {
+ border-bottom: 1px solid #fafafa;
+}
+
+dd div.admonition {
+ margin-left: -60px;
+ padding-left: 60px;
+}
+
+div.admonition p.admonition-title {
+ font-family: {{ header_font }};
+ font-weight: normal;
+ font-size: 24px;
+ margin: 0 0 10px 0;
+ padding: 0;
+ line-height: 1;
+}
+
+div.admonition p.last {
+ margin-bottom: 0;
+}
+
+div.highlight {
+ background-color: white;
+}
+
+dt:target, .highlight {
+ background: #FAF3E8;
+}
+
+div.note {
+ background-color: #eee;
+ border: 1px solid #ccc;
+}
+
+div.seealso {
+ background-color: #ffc;
+ border: 1px solid #ff6;
+}
+
+div.topic {
+ background-color: #eee;
+}
+
+p.admonition-title {
+ display: inline;
+}
+
+p.admonition-title:after {
+ content: ":";
+}
+
+pre, tt, code {
+ font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.9em;
+ background: #eee;
+}
+
+img.screenshot {
+}
+
+tt.descname, tt.descclassname {
+ font-size: 0.95em;
+}
+
+tt.descname {
+ padding-right: 0.08em;
+}
+
+img.screenshot {
+ -moz-box-shadow: 2px 2px 4px #eee;
+ -webkit-box-shadow: 2px 2px 4px #eee;
+ box-shadow: 2px 2px 4px #eee;
+}
+
+table.docutils {
+ border: 1px solid #888;
+ -moz-box-shadow: 2px 2px 4px #eee;
+ -webkit-box-shadow: 2px 2px 4px #eee;
+ box-shadow: 2px 2px 4px #eee;
+}
+
+table.docutils td, table.docutils th {
+ border: 1px solid #888;
+ padding: 0.25em 0.7em;
+}
+
+table.field-list, table.footnote {
+ border: none;
+ -moz-box-shadow: none;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+
+table.footnote {
+ margin: 15px 0;
+ width: 100%;
+ border: 1px solid #eee;
+ background: #fdfdfd;
+ font-size: 0.9em;
+}
+
+table.footnote + table.footnote {
+ margin-top: -15px;
+ border-top: none;
+}
+
+table.field-list th {
+ padding: 0 0.8em 0 0;
+}
+
+table.field-list td {
+ padding: 0;
+}
+
+table.footnote td.label {
+ width: 0px;
+ padding: 0.3em 0 0.3em 0.5em;
+}
+
+table.footnote td {
+ padding: 0.3em 0.5em;
+}
+
+dl {
+ margin: 0;
+ padding: 0;
+}
+
+dl dd {
+ margin-left: 30px;
+}
+
+blockquote {
+ margin: 0 0 0 30px;
+ padding: 0;
+}
+
+ul, ol {
+ margin: 10px 0 10px 30px;
+ padding: 0;
+}
+
+pre {
+ background: #eee;
+ padding: 7px 30px;
+ margin: 15px -30px;
+ line-height: 1.3em;
+}
+
+dl pre, blockquote pre, li pre {
+ margin-left: -60px;
+ padding-left: 60px;
+}
+
+dl dl pre {
+ margin-left: -90px;
+ padding-left: 90px;
+}
+
+tt {
+ background-color: #ecf0f3;
+ color: #222;
+ /* padding: 1px 2px; */
+}
+
+tt.xref, a tt {
+ background-color: #FBFBFB;
+ border-bottom: 1px solid white;
+}
+
+a.reference {
+ text-decoration: none;
+ border-bottom: 1px dotted {{ link_color }};
+}
+
+a.reference:hover {
+ border-bottom: 1px solid {{ link_hover_color }};
+}
+
+a.footnote-reference {
+ text-decoration: none;
+ font-size: 0.7em;
+ vertical-align: top;
+ border-bottom: 1px dotted {{ link_color }};
+}
+
+a.footnote-reference:hover {
+ border-bottom: 1px solid {{ link_hover_color }};
+}
+
+a:hover tt {
+ background: #EEE;
+}
+
+
+@media screen and (max-width: 870px) {
+
+ div.sphinxsidebar {
+ display: none;
+ }
+
+ div.document {
+ width: 100%;
+
+ }
+
+ div.documentwrapper {
+ margin-left: 0;
+ margin-top: 0;
+ margin-right: 0;
+ margin-bottom: 0;
+ }
+
+ div.bodywrapper {
+ margin-top: 0;
+ margin-right: 0;
+ margin-bottom: 0;
+ margin-left: 0;
+ }
+
+ ul {
+ margin-left: 0;
+ }
+
+ .document {
+ width: auto;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .bodywrapper {
+ margin: 0;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .github {
+ display: none;
+ }
+
+
+
+}
+
+
+
+@media screen and (max-width: 875px) {
+
+ body {
+ margin: 0;
+ padding: 20px 30px;
+ }
+
+ div.documentwrapper {
+ float: none;
+ background: white;
+ }
+
+ div.sphinxsidebar {
+ display: block;
+ float: none;
+ width: 102.5%;
+ margin: 50px -30px -20px -30px;
+ padding: 10px 20px;
+ background: #333;
+ color: white;
+ }
+
+ div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
+ div.sphinxsidebar h3 a, div.sphinxsidebar ul {
+ color: white;
+ }
+
+ div.sphinxsidebar a {
+ color: #aaa;
+ }
+
+ div.sphinxsidebar p.logo {
+ display: none;
+ }
+
+ div.document {
+ width: 100%;
+ margin: 0;
+ }
+
+ div.related {
+ display: block;
+ margin: 0;
+ padding: 10px 0 20px 0;
+ }
+
+ div.related ul,
+ div.related ul li {
+ margin: 0;
+ padding: 0;
+ }
+
+ div.footer {
+ display: none;
+ }
+
+ div.bodywrapper {
+ margin: 0;
+ }
+
+ div.body {
+ min-height: 0;
+ padding: 0;
+ }
+
+ .rtd_doc_footer {
+ display: none;
+ }
+
+ .document {
+ width: auto;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .github {
+ display: none;
+ }
+}
+
+/* misc. */
+
+.revsys-inline {
+ display: none!important;
+}
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/theme.conf b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/theme.conf
new file mode 100644
index 000000000..18c720f80
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask/theme.conf
@@ -0,0 +1,9 @@
+[theme]
+inherit = basic
+stylesheet = flasky.css
+pygments_style = flask_theme_support.FlaskyStyle
+
+[options]
+index_logo = ''
+index_logo_height = 120px
+touch_icon =
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask_theme_support.py b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask_theme_support.py
new file mode 100644
index 000000000..33f47449c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/_themes/flask_theme_support.py
@@ -0,0 +1,86 @@
+# flasky extensions. flasky pygments style based on tango style
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
+
+
+class FlaskyStyle(Style):
+ background_color = "#f8f8f8"
+ default_style = ""
+
+ styles = {
+ # No corresponding class for the following:
+ #Text: "", # class: ''
+ Whitespace: "underline #f8f8f8", # class: 'w'
+ Error: "#a40000 border:#ef2929", # class: 'err'
+ Other: "#000000", # class 'x'
+
+ Comment: "italic #8f5902", # class: 'c'
+ Comment.Preproc: "noitalic", # class: 'cp'
+
+ Keyword: "bold #004461", # class: 'k'
+ Keyword.Constant: "bold #004461", # class: 'kc'
+ Keyword.Declaration: "bold #004461", # class: 'kd'
+ Keyword.Namespace: "bold #004461", # class: 'kn'
+ Keyword.Pseudo: "bold #004461", # class: 'kp'
+ Keyword.Reserved: "bold #004461", # class: 'kr'
+ Keyword.Type: "bold #004461", # class: 'kt'
+
+ Operator: "#582800", # class: 'o'
+ Operator.Word: "bold #004461", # class: 'ow' - like keywords
+
+ Punctuation: "bold #000000", # class: 'p'
+
+ # because special names such as Name.Class, Name.Function, etc.
+ # are not recognized as such later in the parsing, we choose them
+ # to look the same as ordinary variables.
+ Name: "#000000", # class: 'n'
+ Name.Attribute: "#c4a000", # class: 'na' - to be revised
+ Name.Builtin: "#004461", # class: 'nb'
+ Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
+ Name.Class: "#000000", # class: 'nc' - to be revised
+ Name.Constant: "#000000", # class: 'no' - to be revised
+ Name.Decorator: "#888", # class: 'nd' - to be revised
+ Name.Entity: "#ce5c00", # class: 'ni'
+ Name.Exception: "bold #cc0000", # class: 'ne'
+ Name.Function: "#000000", # class: 'nf'
+ Name.Property: "#000000", # class: 'py'
+ Name.Label: "#f57900", # class: 'nl'
+ Name.Namespace: "#000000", # class: 'nn' - to be revised
+ Name.Other: "#000000", # class: 'nx'
+ Name.Tag: "bold #004461", # class: 'nt' - like a keyword
+ Name.Variable: "#000000", # class: 'nv' - to be revised
+ Name.Variable.Class: "#000000", # class: 'vc' - to be revised
+ Name.Variable.Global: "#000000", # class: 'vg' - to be revised
+ Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
+
+ Number: "#990000", # class: 'm'
+
+ Literal: "#000000", # class: 'l'
+ Literal.Date: "#000000", # class: 'ld'
+
+ String: "#4e9a06", # class: 's'
+ String.Backtick: "#4e9a06", # class: 'sb'
+ String.Char: "#4e9a06", # class: 'sc'
+ String.Doc: "italic #8f5902", # class: 'sd' - like a comment
+ String.Double: "#4e9a06", # class: 's2'
+ String.Escape: "#4e9a06", # class: 'se'
+ String.Heredoc: "#4e9a06", # class: 'sh'
+ String.Interpol: "#4e9a06", # class: 'si'
+ String.Other: "#4e9a06", # class: 'sx'
+ String.Regex: "#4e9a06", # class: 'sr'
+ String.Single: "#4e9a06", # class: 's1'
+ String.Symbol: "#4e9a06", # class: 'ss'
+
+ Generic: "#000000", # class: 'g'
+ Generic.Deleted: "#a40000", # class: 'gd'
+ Generic.Emph: "italic #000000", # class: 'ge'
+ Generic.Error: "#ef2929", # class: 'gr'
+ Generic.Heading: "bold #000080", # class: 'gh'
+ Generic.Inserted: "#00A000", # class: 'gi'
+ Generic.Output: "#888", # class: 'go'
+ Generic.Prompt: "#745334", # class: 'gp'
+ Generic.Strong: "bold #000000", # class: 'gs'
+ Generic.Subheading: "bold #800080", # class: 'gu'
+ Generic.Traceback: "bold #a40000", # class: 'gt'
+ }
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/adopt.rst b/testing/web-platform/tests/tools/pytest/doc/en/adopt.rst
new file mode 100644
index 000000000..aead96e7f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/adopt.rst
@@ -0,0 +1,78 @@
+
+April 2015 is "adopt pytest month"
+=============================================
+
+Are you an enthusiastic pytest user, the local testing guru in your workplace? Or are you considering using pytest for your open source project, but not sure how to get started? Then you may be interested in "adopt pytest month"!
+
+We will pair experienced pytest users with open source projects, for a month's effort of getting new development teams started with pytest.
+
+In 2015 we are trying this for the first time. In February and March 2015 we will gather volunteers on both sides, in April we will do the work, and in May we will evaluate how it went. This effort is being coordinated by Brianna Laugher. If you have any questions or comments, you can raise them on the `@pytestdotorg twitter account <https://twitter.com/pytestdotorg>`_ the `issue tracker`_ or the `pytest-dev mailing list`_.
+
+
+.. _`issue tracker`: https://github.com/pytest-dev/pytest/issues/676
+.. _`pytest-dev mailing list`: https://mail.python.org/mailman/listinfo/pytest-dev
+
+
+The ideal pytest helper
+-----------------------------------------
+
+ - will be able to commit 2-4 hours a week to working with their particular project (this might involve joining their mailing list, installing the software and exploring any existing tests, offering advice, writing some example tests)
+ - feels confident in using pytest (e.g. has explored command line options, knows how to write parametrized tests, has an idea about conftest contents)
+ - does not need to be an expert in every aspect!
+
+`Pytest helpers, sign up here`_! (preferably in February, hard deadline 22 March)
+
+
+.. _`Pytest helpers, sign up here`: http://goo.gl/forms/nxqAhqWt1P
+
+
+The ideal partner project
+-----------------------------------------
+
+ - is open source, and predominantly written in Python
+ - has an automated/documented install process for developers
+ - has more than one core developer
+ - has at least one official release (e.g. is available on pypi)
+ - has the support of the core development team, in trying out pytest adoption
+ - has no tests... or 100% test coverage... or somewhere in between!
+
+`Partner projects, sign up here`_! (by 22 March)
+
+
+.. _`Partner projects, sign up here`: http://goo.gl/forms/ZGyqlHiwk3
+
+
+What does it mean to "adopt pytest"?
+-----------------------------------------
+
+There can be many different definitions of "success". Pytest can run many `nose and unittest`_ tests by default, so using pytest as your testrunner may be possible from day 1. Job done, right?
+
+Progressive success might look like:
+
+ - tests can be run (by pytest) without errors (there may be failures)
+ - tests can be run (by pytest) without failures
+ - test runner is integrated into CI server
+ - existing tests are rewritten to take advantage of pytest features - this can happen in several iterations, for example:
+ - changing to native assert_ statements (pycmd_ has a script to help with that, ``pyconvert_unittest.py``)
+ - changing `setUp/tearDown methods`_ to fixtures_
+ - adding markers_
+ - other changes to reduce boilerplate
+ - assess needs for future tests to be written, e.g. new fixtures, distributed_ testing tweaks
+
+"Success" should also include that the development team feels comfortable with their knowledge of how to use pytest. In fact this is probably more important than anything else. So spending a lot of time on communication, giving examples, etc will probably be important - both in running the tests, and in writing them.
+
+It may be after the month is up, the partner project decides that pytest is not right for it. That's okay - hopefully the pytest team will also learn something about its weaknesses or deficiencies.
+
+.. _`nose and unittest`: faq.html#how-does-pytest-relate-to-nose-and-unittest
+.. _assert: asserts.html
+.. _pycmd: https://bitbucket.org/hpk42/pycmd/overview
+.. _`setUp/tearDown methods`: xunit_setup.html
+.. _fixtures: fixture.html
+.. _markers: markers.html
+.. _distributed: xdist.html
+
+
+Other ways to help
+-----------------------------------------
+
+Promote! Do your favourite open source Python projects use pytest? If not, why not tell them about this page?
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/index.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/index.rst
new file mode 100644
index 000000000..877afff77
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/index.rst
@@ -0,0 +1,48 @@
+
+Release announcements
+===========================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+ sprint2016
+ release-2.9.0
+ release-2.8.7
+ release-2.8.6
+ release-2.8.5
+ release-2.8.4
+ release-2.8.3
+ release-2.8.2
+ release-2.7.2
+ release-2.7.1
+ release-2.7.0
+ release-2.6.3
+ release-2.6.2
+ release-2.6.1
+ release-2.6.0
+ release-2.5.2
+ release-2.5.1
+ release-2.5.0
+ release-2.4.2
+ release-2.4.1
+ release-2.4.0
+ release-2.3.5
+ release-2.3.4
+ release-2.3.3
+ release-2.3.2
+ release-2.3.1
+ release-2.3.0
+ release-2.2.4
+ release-2.2.2
+ release-2.2.1
+ release-2.2.0
+ release-2.1.3
+ release-2.1.2
+ release-2.1.1
+ release-2.1.0
+ release-2.0.3
+ release-2.0.2
+ release-2.0.1
+ release-2.0.0
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.0.rst
new file mode 100644
index 000000000..af745fc59
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.0.rst
@@ -0,0 +1,129 @@
+py.test 2.0.0: asserts++, unittest++, reporting++, config++, docs++
+===========================================================================
+
+Welcome to pytest-2.0.0, a major new release of "py.test", the rapid
+easy Python testing tool. There are many new features and enhancements,
+see below for summary and detailed lists. A lot of long-deprecated code
+has been removed, resulting in a much smaller and cleaner
+implementation. See the new docs with examples here:
+
+ http://pytest.org/2.0.0/index.html
+
+A note on packaging: pytest used to part of the "py" distribution up
+until version py-1.3.4 but this has changed now: pytest-2.0.0 only
+contains py.test related code and is expected to be backward-compatible
+to existing test code. If you want to install pytest, just type one of::
+
+ pip install -U pytest
+ easy_install -U pytest
+
+Many thanks to all issue reporters and people asking questions or
+complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt
+for their great coding contributions and many others for feedback and help.
+
+best,
+holger krekel
+
+
+New Features
+-----------------------
+
+- new invocations through Python interpreter and from Python::
+
+ python -m pytest # on all pythons >= 2.5
+
+ or from a python program::
+
+ import pytest ; pytest.main(arglist, pluginlist)
+
+ see http://pytest.org/2.0.0/usage.html for details.
+
+- new and better reporting information in assert expressions
+ if comparing lists, sequences or strings.
+
+ see http://pytest.org/2.0.0/assert.html#newreport
+
+- new configuration through ini-files (setup.cfg or tox.ini recognized),
+ for example::
+
+ [pytest]
+ norecursedirs = .hg data* # don't ever recurse in such dirs
+ addopts = -x --pyargs # add these command line options by default
+
+ see http://pytest.org/2.0.0/customize.html
+
+- improved standard unittest support. In general py.test should now
+ better be able to run custom unittest.TestCases like twisted trial
+ or Django based TestCases. Also you can now run the tests of an
+ installed 'unittest' package with py.test::
+
+ py.test --pyargs unittest
+
+- new "-q" option which decreases verbosity and prints a more
+ nose/unittest-style "dot" output.
+
+- many many more detailed improvements details
+
+Fixes
+-----------------------
+
+- fix issue126 - introduce py.test.set_trace() to trace execution via
+ PDB during the running of tests even if capturing is ongoing.
+- fix issue124 - make reporting more resilient against tests opening
+ files on filedescriptor 1 (stdout).
+- fix issue109 - sibling conftest.py files will not be loaded.
+ (and Directory collectors cannot be customized anymore from a Directory's
+ conftest.py - this needs to happen at least one level up).
+- fix issue88 (finding custom test nodes from command line arg)
+- fix issue93 stdout/stderr is captured while importing conftest.py
+- fix bug: unittest collected functions now also can have "pytestmark"
+ applied at class/module level
+
+Important Notes
+--------------------
+
+* The usual way in pre-2.0 times to use py.test in python code was
+ to import "py" and then e.g. use "py.test.raises" for the helper.
+ This remains valid and is not planned to be deprecated. However,
+ in most examples and internal code you'll find "import pytest"
+ and "pytest.raises" used as the recommended default way.
+
+* pytest now first performs collection of the complete test suite
+ before running any test. This changes for example the semantics of when
+ pytest_collectstart/pytest_collectreport are called. Some plugins may
+ need upgrading.
+
+* The pytest package consists of a 400 LOC core.py and about 20 builtin plugins,
+ summing up to roughly 5000 LOCs, including docstrings. To be fair, it also
+ uses generic code from the "pylib", and the new "py" package to help
+ with filesystem and introspection/code manipulation.
+
+(Incompatible) Removals
+-----------------------------
+
+- py.test.config is now only available if you are in a test run.
+
+- the following (mostly already deprecated) functionality was removed:
+
+ - removed support for Module/Class/... collection node definitions
+ in conftest.py files. They will cause nothing special.
+ - removed support for calling the pre-1.0 collection API of "run()" and "join"
+ - removed reading option values from conftest.py files or env variables.
+ This can now be done much much better and easier through the ini-file
+ mechanism and the "addopts" entry in particular.
+ - removed the "disabled" attribute in test classes. Use the skipping
+ and pytestmark mechanism to skip or xfail a test class.
+
+- py.test.collect.Directory does not exist anymore and it
+ is not possible to provide an own "Directory" object.
+ If you have used this and don't know what to do, get
+ in contact. We'll figure something out.
+
+ Note that pytest_collect_directory() is still called but
+ any return value will be ignored. This allows to keep
+ old code working that performed for example "py.test.skip()"
+ in collect() to prevent recursion into directory trees
+ if a certain dependency or command line option is missing.
+
+
+see :ref:`changelog` for more detailed changes.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.1.rst
new file mode 100644
index 000000000..2f41ef943
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.1.rst
@@ -0,0 +1,67 @@
+py.test 2.0.1: bug fixes
+===========================================================================
+
+Welcome to pytest-2.0.1, a maintenance and bug fix release of pytest,
+a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
+and latest PyPy interpreters. See extensive docs with tested examples here:
+
+ http://pytest.org/
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Many thanks to all issue reporters and people asking questions or
+complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt
+for their great coding contributions and many others for feedback and help.
+
+best,
+holger krekel
+
+Changes between 2.0.0 and 2.0.1
+----------------------------------------------
+
+- refine and unify initial capturing so that it works nicely
+ even if the logging module is used on an early-loaded conftest.py
+ file or plugin.
+- fix issue12 - show plugin versions with "--version" and
+ "--traceconfig" and also document how to add extra information
+ to reporting test header
+- fix issue17 (import-* reporting issue on python3) by
+ requiring py>1.4.0 (1.4.1 is going to include it)
+- fix issue10 (numpy arrays truth checking) by refining
+ assertion interpretation in py lib
+- fix issue15: make nose compatibility tests compatible
+ with python3 (now that nose-1.0 supports python3)
+- remove somewhat surprising "same-conftest" detection because
+ it ignores conftest.py when they appear in several subdirs.
+- improve assertions ("not in"), thanks Floris Bruynooghe
+- improve behaviour/warnings when running on top of "python -OO"
+ (assertions and docstrings are turned off, leading to potential
+ false positives)
+- introduce a pytest_cmdline_processargs(args) hook
+ to allow dynamic computation of command line arguments.
+ This fixes a regression because py.test prior to 2.0
+ allowed to set command line options from conftest.py
+ files which so far pytest-2.0 only allowed from ini-files now.
+- fix issue7: assert failures in doctest modules.
+ unexpected failures in doctests will not generally
+ show nicer, i.e. within the doctest failing context.
+- fix issue9: setup/teardown functions for an xfail-marked
+ test will report as xfail if they fail but report as normally
+ passing (not xpassing) if they succeed. This only is true
+ for "direct" setup/teardown invocations because teardown_class/
+ teardown_module cannot closely relate to a single test.
+- fix issue14: no logging errors at process exit
+- refinements to "collecting" output on non-ttys
+- refine internal plugin registration and --traceconfig output
+- introduce a mechanism to prevent/unregister plugins from the
+ command line, see http://pytest.org/latest/plugins.html#cmdunregister
+- activate resultlog plugin by default
+- fix regression wrt yielded tests which due to the
+ collection-before-running semantics were not
+ setup as with pytest 1.3.4. Note, however, that
+ the recommended and much cleaner way to do test
+ parametrization remains the "pytest_generate_tests"
+ mechanism, see the docs.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.2.rst
new file mode 100644
index 000000000..733a9f7bd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.2.rst
@@ -0,0 +1,73 @@
+py.test 2.0.2: bug fixes, improved xfail/skip expressions, speed ups
+===========================================================================
+
+Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest,
+a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
+and latest PyPy interpreters. See the extensive docs with tested examples here:
+
+ http://pytest.org/
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Many thanks to all issue reporters and people asking questions
+or complaining, particularly Jurko for his insistence,
+Laura, Victor and Brianna for helping with improving
+and Ronny for his general advise.
+
+best,
+holger krekel
+
+Changes between 2.0.1 and 2.0.2
+----------------------------------------------
+
+- tackle issue32 - speed up test runs of very quick test functions
+ by reducing the relative overhead
+
+- fix issue30 - extended xfail/skipif handling and improved reporting.
+ If you have a syntax error in your skip/xfail
+ expressions you now get nice error reports.
+
+ Also you can now access module globals from xfail/skipif
+ expressions so that this for example works now::
+
+ import pytest
+ import mymodule
+ @pytest.mark.skipif("mymodule.__version__[0] == "1")
+ def test_function():
+ pass
+
+ This will not run the test function if the module's version string
+ does not start with a "1". Note that specifying a string instead
+ of a boolean expressions allows py.test to report meaningful information
+ when summarizing a test run as to what conditions lead to skipping
+ (or xfail-ing) tests.
+
+- fix issue28 - setup_method and pytest_generate_tests work together
+ The setup_method fixture method now gets called also for
+ test function invocations generated from the pytest_generate_tests
+ hook.
+
+- fix issue27 - collectonly and keyword-selection (-k) now work together
+ Also, if you do "py.test --collectonly -q" you now get a flat list
+ of test ids that you can use to paste to the py.test commandline
+ in order to execute a particular test.
+
+- fix issue25 avoid reported problems with --pdb and python3.2/encodings output
+
+- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP
+ Starting with Python3.2 os.symlink may be supported. By requiring
+ a newer py lib version the py.path.local() implementation acknowledges
+ this.
+
+- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular
+ thanks to Laura Creighton who also revieved parts of the documentation.
+
+- fix slighly wrong output of verbose progress reporting for classes
+ (thanks Amaury)
+
+- more precise (avoiding of) deprecation warnings for node.Class|Function accesses
+
+- avoid std unittest assertion helper code in tracebacks (thanks Ronny)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.3.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.3.rst
new file mode 100644
index 000000000..ed746e851
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.0.3.rst
@@ -0,0 +1,40 @@
+py.test 2.0.3: bug fixes and speed ups
+===========================================================================
+
+Welcome to pytest-2.0.3, a maintenance and bug fix release of pytest,
+a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
+and latest PyPy interpreters. See the extensive docs with tested examples here:
+
+ http://pytest.org/
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+There also is a bugfix release 1.6 of pytest-xdist, the plugin
+that enables seemless distributed and "looponfail" testing for Python.
+
+best,
+holger krekel
+
+Changes between 2.0.2 and 2.0.3
+----------------------------------------------
+
+- fix issue38: nicer tracebacks on calls to hooks, particularly early
+ configure/sessionstart ones
+
+- fix missing skip reason/meta information in junitxml files, reported
+ via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html
+
+- fix issue34: avoid collection failure with "test" prefixed classes
+ deriving from object.
+
+- don't require zlib (and other libs) for genscript plugin without
+ --genscript actually being used.
+
+- speed up skips (by not doing a full traceback represenation
+ internally)
+
+- fix issue37: avoid invalid characters in junitxml's output
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.0.rst
new file mode 100644
index 000000000..831548ac2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.0.rst
@@ -0,0 +1,47 @@
+py.test 2.1.0: perfected assertions and bug fixes
+===========================================================================
+
+Welcome to the release of pytest-2.1, a mature testing tool for Python,
+supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See
+the improved extensive docs (now also as PDF!) with tested examples here:
+
+ http://pytest.org/
+
+The single biggest news about this release are **perfected assertions**
+courtesy of Benjamin Peterson. You can now safely use ``assert``
+statements in test modules without having to worry about side effects
+or python optimization ("-OO") options. This is achieved by rewriting
+assert statements in test modules upon import, using a PEP302 hook.
+See http://pytest.org/assert.html#advanced-assertion-introspection for
+detailed information. The work has been partly sponsored by my company,
+merlinux GmbH.
+
+For further details on bug fixes and smaller enhancements see below.
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel / http://merlinux.eu
+
+Changes between 2.0.3 and 2.1.0
+----------------------------------------------
+
+- fix issue53 call nosestyle setup functions with correct ordering
+- fix issue58 and issue59: new assertion code fixes
+- merge Benjamin's assertionrewrite branch: now assertions
+ for test modules on python 2.6 and above are done by rewriting
+ the AST and saving the pyc file before the test module is imported.
+ see doc/assert.txt for more info.
+- fix issue43: improve doctests with better traceback reporting on
+ unexpected exceptions
+- fix issue47: timing output in junitxml for test cases is now correct
+- fix issue48: typo in MarkInfo repr leading to exception
+- fix issue49: avoid confusing error when initialization partially fails
+- fix issue44: env/username expansion for junitxml file path
+- show releaselevel information in test runs for pypy
+- reworked doc pages for better navigation and PDF generation
+- report KeyboardInterrupt even if interrupted during session startup
+- fix issue 35 - provide PDF doc version and download link from index page
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.1.rst
new file mode 100644
index 000000000..ecdd69f4d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.1.rst
@@ -0,0 +1,37 @@
+py.test 2.1.1: assertion fixes and improved junitxml output
+===========================================================================
+
+pytest-2.1.1 is a backward compatible maintenance release of the
+popular py.test testing tool. See extensive docs with examples here:
+
+ http://pytest.org/
+
+Most bug fixes address remaining issues with the perfected assertions
+introduced with 2.1.0 - many thanks to the bug reporters and to Benjamin
+Peterson for helping to fix them. Also, junitxml output now produces
+system-out/err tags which lead to better displays of tracebacks with Jenkins.
+
+Also a quick note to package maintainers and others interested: there now
+is a "pytest" man page which can be generated with "make man" in doc/.
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel / http://merlinux.eu
+
+Changes between 2.1.0 and 2.1.1
+----------------------------------------------
+
+- fix issue64 / pytest.set_trace now works within pytest_generate_tests hooks
+- fix issue60 / fix error conditions involving the creation of __pycache__
+- fix issue63 / assertion rewriting on inserts involving strings containing '%'
+- fix assertion rewriting on calls with a ** arg
+- don't cache rewritten modules if bytecode generation is disabled
+- fix assertion rewriting in read-only directories
+- fix issue59: provide system-out/err tags for junitxml output
+- fix issue61: assertion rewriting on boolean operations with 3 or more operands
+- you can now build a man page with "cd doc ; make man"
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.2.rst
new file mode 100644
index 000000000..51b7591d3
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.2.rst
@@ -0,0 +1,33 @@
+py.test 2.1.2: bug fixes and fixes for jython
+===========================================================================
+
+pytest-2.1.2 is a minor backward compatible maintenance release of the
+popular py.test testing tool. pytest is commonly used for unit,
+functional- and integration testing. See extensive docs with examples
+here:
+
+ http://pytest.org/
+
+Most bug fixes address remaining issues with the perfected assertions
+introduced in the 2.1 series - many thanks to the bug reporters and to Benjamin
+Peterson for helping to fix them. pytest should also work better with
+Jython-2.5.1 (and Jython trunk).
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel / http://merlinux.eu
+
+Changes between 2.1.1 and 2.1.2
+----------------------------------------
+
+- fix assertion rewriting on files with windows newlines on some Python versions
+- refine test discovery by package/module name (--pyargs), thanks Florian Mayer
+- fix issue69 / assertion rewriting fixed on some boolean operations
+- fix issue68 / packages now work with assertion rewriting
+- fix issue66: use different assertion rewriting caches when the -O option is passed
+- don't try assertion rewriting on Jython, use reinterp
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.3.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.3.rst
new file mode 100644
index 000000000..f4da60b8b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.1.3.rst
@@ -0,0 +1,32 @@
+py.test 2.1.3: just some more fixes
+===========================================================================
+
+pytest-2.1.3 is a minor backward compatible maintenance release of the
+popular py.test testing tool. It is commonly used for unit, functional-
+and integration testing. See extensive docs with examples here:
+
+ http://pytest.org/
+
+The release contains another fix to the perfected assertions introduced
+with the 2.1 series as well as the new possibility to customize reporting
+for assertion expressions on a per-directory level.
+
+If you want to install or upgrade pytest, just type one of::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Thanks to the bug reporters and to Ronny Pfannschmidt, Benjamin Peterson
+and Floris Bruynooghe who implemented the fixes.
+
+best,
+holger krekel
+
+Changes between 2.1.2 and 2.1.3
+----------------------------------------
+
+- fix issue79: assertion rewriting failed on some comparisons in boolops,
+- correctly handle zero length arguments (a la pytest '')
+- fix issue67 / junitxml now contains correct test durations
+- fix issue75 / skipping test failure on jython
+- fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.0.rst
new file mode 100644
index 000000000..20bfe0a19
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.0.rst
@@ -0,0 +1,95 @@
+py.test 2.2.0: test marking++, parametrization++ and duration profiling
+===========================================================================
+
+pytest-2.2.0 is a test-suite compatible release of the popular
+py.test testing tool. Plugins might need upgrades. It comes
+with these improvements:
+
+* easier and more powerful parametrization of tests:
+
+ - new @pytest.mark.parametrize decorator to run tests with different arguments
+ - new metafunc.parametrize() API for parametrizing arguments independently
+ - see examples at http://pytest.org/latest/example/parametrize.html
+ - NOTE that parametrize() related APIs are still a bit experimental
+ and might change in future releases.
+
+* improved handling of test markers and refined marking mechanism:
+
+ - "-m markexpr" option for selecting tests according to their mark
+ - a new "markers" ini-variable for registering test markers for your project
+ - the new "--strict" bails out with an error if using unregistered markers.
+ - see examples at http://pytest.org/latest/example/markers.html
+
+* duration profiling: new "--duration=N" option showing the N slowest test
+ execution or setup/teardown calls. This is most useful if you want to
+ find out where your slowest test code is.
+
+* also 2.2.0 performs more eager calling of teardown/finalizers functions
+ resulting in better and more accurate reporting when they fail
+
+Besides there is the usual set of bug fixes along with a cleanup of
+pytest's own test suite allowing it to run on a wider range of environments.
+
+For general information, see extensive docs with examples here:
+
+ http://pytest.org/
+
+If you want to install or upgrade pytest you might just type::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, Alfredo Deza and all who gave feedback or sent bug reports.
+
+best,
+holger krekel
+
+
+notes on incompatibility
+------------------------------
+
+While test suites should work unchanged you might need to upgrade plugins:
+
+* You need a new version of the pytest-xdist plugin (1.7) for distributing
+ test runs.
+
+* Other plugins might need an upgrade if they implement
+ the ``pytest_runtest_logreport`` hook which now is called unconditionally
+ for the setup/teardown fixture phases of a test. You may choose to
+ ignore setup/teardown failures by inserting "if rep.when != 'call': return"
+ or something similar. Note that most code probably "just" works because
+ the hook was already called for failing setup/teardown phases of a test
+ so a plugin should have been ready to grok such reports already.
+
+
+Changes between 2.1.3 and 2.2.0
+----------------------------------------
+
+- fix issue90: introduce eager tearing down of test items so that
+ teardown function are called earlier.
+- add an all-powerful metafunc.parametrize function which allows to
+ parametrize test function arguments in multiple steps and therefore
+ from independent plugins and places.
+- add a @pytest.mark.parametrize helper which allows to easily
+ call a test function with different argument values.
+- Add examples to the "parametrize" example page, including a quick port
+ of Test scenarios and the new parametrize function and decorator.
+- introduce registration for "pytest.mark.*" helpers via ini-files
+ or through plugin hooks. Also introduce a "--strict" option which
+ will treat unregistered markers as errors
+ allowing to avoid typos and maintain a well described set of markers
+ for your test suite. See examples at http://pytest.org/latest/mark.html
+ and its links.
+- issue50: introduce "-m marker" option to select tests based on markers
+ (this is a stricter and more predictable version of "-k" in that "-m"
+ only matches complete markers and has more obvious rules for and/or
+ semantics.
+- new feature to help optimizing the speed of your tests:
+ --durations=N option for displaying N slowest test calls
+ and setup/teardown methods.
+- fix issue87: --pastebin now works with python3
+- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
+- fix and cleanup pytest's own test suite to not leak FDs
+- fix issue83: link to generated funcarg list
+- fix issue74: pyarg module names are now checked against imp.find_module false positives
+- fix compatibility with twisted/trial-11.1.0 use cases
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.1.rst
new file mode 100644
index 000000000..f9764634c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.1.rst
@@ -0,0 +1,41 @@
+pytest-2.2.1: bug fixes, perfect teardowns
+===========================================================================
+
+
+pytest-2.2.1 is a minor backward-compatible release of the the py.test
+testing tool. It contains bug fixes and little improvements, including
+documentation fixes. If you are using the distributed testing
+pluginmake sure to upgrade it to pytest-xdist-1.8.
+
+For general information see here:
+
+ http://pytest.org/
+
+To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Special thanks for helping on this release to Ronny Pfannschmidt, Jurko
+Gospodnetic and Ralf Schmitt.
+
+best,
+holger krekel
+
+
+Changes between 2.2.0 and 2.2.1
+----------------------------------------
+
+- fix issue99 (in pytest and py) internallerrors with resultlog now
+ produce better output - fixed by normalizing pytest_internalerror
+ input arguments.
+- fix issue97 / traceback issues (in pytest and py) improve traceback output
+ in conjunction with jinja2 and cython which hack tracebacks
+- fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns":
+ the final test in a test node will now run its teardown directly
+ instead of waiting for the end of the session. Thanks Dave Hunt for
+ the good reporting and feedback. The pytest_runtest_protocol as well
+ as the pytest_runtest_teardown hooks now have "nextitem" available
+ which will be None indicating the end of the test run.
+- fix collection crash due to unknown-source collected items, thanks
+ to Ralf Schmitt (fixed by depending on a more recent pylib)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.2.rst
new file mode 100644
index 000000000..733aedec4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.2.rst
@@ -0,0 +1,43 @@
+pytest-2.2.2: bug fixes
+===========================================================================
+
+pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor
+backward-compatible release of the versatile py.test testing tool. It
+contains bug fixes and a few refinements particularly to reporting with
+"--collectonly", see below for betails.
+
+For general information see here:
+
+ http://pytest.org/
+
+To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Special thanks for helping on this release to Ronny Pfannschmidt
+and Ralf Schmitt and the contributors of issues.
+
+best,
+holger krekel
+
+
+Changes between 2.2.1 and 2.2.2
+----------------------------------------
+
+- fix issue101: wrong args to unittest.TestCase test function now
+ produce better output
+- fix issue102: report more useful errors and hints for when a
+ test directory was renamed and some pyc/__pycache__ remain
+- fix issue106: allow parametrize to be applied multiple times
+ e.g. from module, class and at function level.
+- fix issue107: actually perform session scope finalization
+- don't check in parametrize if indirect parameters are funcarg names
+- add chdir method to monkeypatch funcarg
+- fix crash resulting from calling monkeypatch undo a second time
+- fix issue115: make --collectonly robust against early failure
+ (missing files/directories)
+- "-qq --collectonly" now shows only files and the number of tests in them
+- "-q --collectonly" now shows test ids
+- allow adding of attributes to test reports such that it also works
+ with distributed testing (no upgrade of pytest-xdist needed)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.4.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.4.rst
new file mode 100644
index 000000000..8720bdb28
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.2.4.rst
@@ -0,0 +1,39 @@
+pytest-2.2.4: bug fixes, better junitxml/unittest/python3 compat
+===========================================================================
+
+pytest-2.2.4 is a minor backward-compatible release of the versatile
+py.test testing tool. It contains bug fixes and a few refinements
+to junitxml reporting, better unittest- and python3 compatibility.
+
+For general information see here:
+
+ http://pytest.org/
+
+To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Special thanks for helping on this release to Ronny Pfannschmidt
+and Benjamin Peterson and the contributors of issues.
+
+best,
+holger krekel
+
+Changes between 2.2.3 and 2.2.4
+-----------------------------------
+
+- fix error message for rewritten assertions involving the % operator
+- fix issue 126: correctly match all invalid xml characters for junitxml
+ binary escape
+- fix issue with unittest: now @unittest.expectedFailure markers should
+ be processed correctly (you can also use @pytest.mark markers)
+- document integration with the extended distribute/setuptools test commands
+- fix issue 140: propperly get the real functions
+ of bound classmethods for setup/teardown_class
+- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
+- fix issue #143: call unconfigure/sessionfinish always when
+ configure/sessionstart where called
+- fix issue #144: better mangle test ids to junitxml classnames
+- upgrade distribute_setup.py to 0.6.27
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.0.rst
new file mode 100644
index 000000000..54fe3961f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.0.rst
@@ -0,0 +1,134 @@
+pytest-2.3: improved fixtures / better unittest integration
+=============================================================================
+
+pytest-2.3 comes with many major improvements for fixture/funcarg management
+and parametrized testing in Python. It is now easier, more efficient and
+more predicatable to re-run the same tests with different fixture
+instances. Also, you can directly declare the caching "scope" of
+fixtures so that dependent tests throughout your whole test suite can
+re-use database or other expensive fixture objects with ease. Lastly,
+it's possible for fixture functions (formerly known as funcarg
+factories) to use other fixtures, allowing for a completely modular and
+re-useable fixture design.
+
+For detailed info and tutorial-style examples, see:
+
+ http://pytest.org/latest/fixture.html
+
+Moreover, there is now support for using pytest fixtures/funcargs with
+unittest-style suites, see here for examples:
+
+ http://pytest.org/latest/unittest.html
+
+Besides, more unittest-test suites are now expected to "simply work"
+with pytest.
+
+All changes are backward compatible and you should be able to continue
+to run your test suites and 3rd party plugins that worked with
+pytest-2.2.4.
+
+If you are interested in the precise reasoning (including examples) of the
+pytest-2.3 fixture evolution, please consult
+http://pytest.org/latest/funcarg_compare.html
+
+For general info on installation and getting started:
+
+ http://pytest.org/latest/getting-started.html
+
+Docs and PDF access as usual at:
+
+ http://pytest.org
+
+and more details for those already in the knowing of pytest can be found
+in the CHANGELOG below.
+
+Particular thanks for this release go to Floris Bruynooghe, Alex Okrushko
+Carl Meyer, Ronny Pfannschmidt, Benjamin Peterson and Alex Gaynor for helping
+to get the new features right and well integrated. Ronny and Floris
+also helped to fix a number of bugs and yet more people helped by
+providing bug reports.
+
+have fun,
+holger krekel
+
+
+Changes between 2.2.4 and 2.3.0
+-----------------------------------
+
+- fix issue202 - better automatic names for parametrized test functions
+- fix issue139 - introduce @pytest.fixture which allows direct scoping
+ and parametrization of funcarg factories. Introduce new @pytest.setup
+ marker to allow the writing of setup functions which accept funcargs.
+- fix issue198 - conftest fixtures were not found on windows32 in some
+ circumstances with nested directory structures due to path manipulation issues
+- fix issue193 skip test functions with were parametrized with empty
+ parameter sets
+- fix python3.3 compat, mostly reporting bits that previously depended
+ on dict ordering
+- introduce re-ordering of tests by resource and parametrization setup
+ which takes precedence to the usual file-ordering
+- fix issue185 monkeypatching time.time does not cause pytest to fail
+- fix issue172 duplicate call of pytest.setup-decoratored setup_module
+ functions
+- fix junitxml=path construction so that if tests change the
+ current working directory and the path is a relative path
+ it is constructed correctly from the original current working dir.
+- fix "python setup.py test" example to cause a proper "errno" return
+- fix issue165 - fix broken doc links and mention stackoverflow for FAQ
+- catch unicode-issues when writing failure representations
+ to terminal to prevent the whole session from crashing
+- fix xfail/skip confusion: a skip-mark or an imperative pytest.skip
+ will now take precedence before xfail-markers because we
+ can't determine xfail/xpass status in case of a skip. see also:
+ http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
+
+- always report installed 3rd party plugins in the header of a test run
+
+- fix issue160: a failing setup of an xfail-marked tests should
+ be reported as xfail (not xpass)
+
+- fix issue128: show captured output when capsys/capfd are used
+
+- fix issue179: propperly show the dependency chain of factories
+
+- pluginmanager.register(...) now raises ValueError if the
+ plugin has been already registered or the name is taken
+
+- fix issue159: improve http://pytest.org/latest/faq.html
+ especially with respect to the "magic" history, also mention
+ pytest-django, trial and unittest integration.
+
+- make request.keywords and node.keywords writable. All descendant
+ collection nodes will see keyword values. Keywords are dictionaries
+ containing markers and other info.
+
+- fix issue 178: xml binary escapes are now wrapped in py.xml.raw
+
+- fix issue 176: correctly catch the builtin AssertionError
+ even when we replaced AssertionError with a subclass on the
+ python level
+
+- factory discovery no longer fails with magic global callables
+ that provide no sane __code__ object (mock.call for example)
+
+- fix issue 182: testdir.inprocess_run now considers passed plugins
+
+- fix issue 188: ensure sys.exc_info is clear on python2
+ before calling into a test
+
+- fix issue 191: add unittest TestCase runTest method support
+- fix issue 156: monkeypatch correctly handles class level descriptors
+
+- reporting refinements:
+
+ - pytest_report_header now receives a "startdir" so that
+ you can use startdir.bestrelpath(yourpath) to show
+ nice relative path
+
+ - allow plugins to implement both pytest_report_header and
+ pytest_sessionstart (sessionstart is invoked first).
+
+ - don't show deselected reason line if there is none
+
+ - py.test -vv will show all of assert comparisations instead of truncating
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.1.rst
new file mode 100644
index 000000000..b787dc203
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.1.rst
@@ -0,0 +1,39 @@
+pytest-2.3.1: fix regression with factory functions
+===========================================================================
+
+pytest-2.3.1 is a quick follow-up release:
+
+- fix issue202 - regression with fixture functions/funcarg factories:
+ using "self" is now safe again and works as in 2.2.4. Thanks
+ to Eduard Schettino for the quick bug report.
+
+- disable pexpect pytest self tests on Freebsd - thanks Koob for the
+ quick reporting
+
+- fix/improve interactive docs with --markers
+
+See
+
+ http://pytest.org/
+
+for general information. To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel
+
+
+Changes between 2.3.0 and 2.3.1
+-----------------------------------
+
+- fix issue202 - fix regression: using "self" from fixture functions now
+ works as expected (it's the same "self" instance that a test method
+ which uses the fixture sees)
+
+- skip pexpect using tests (test_pdb.py mostly) on freebsd* systems
+ due to pexpect not supporting it properly (hanging)
+
+- link to web pages from --markers output which provides help for
+ pytest.mark.* usage.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.2.rst
new file mode 100644
index 000000000..948b374d4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.2.rst
@@ -0,0 +1,57 @@
+pytest-2.3.2: some fixes and more traceback-printing speed
+===========================================================================
+
+pytest-2.3.2 is a another stabilization release:
+
+- issue 205: fixes a regression with conftest detection
+- issue 208/29: fixes traceback-printing speed in some bad cases
+- fix teardown-ordering for parametrized setups
+- fix unittest and trial compat behaviour with respect to runTest() methods
+- issue 206 and others: some improvements to packaging
+- fix issue127 and others: improve some docs
+
+See
+
+ http://pytest.org/
+
+for general information. To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel
+
+
+Changes between 2.3.1 and 2.3.2
+-----------------------------------
+
+- fix issue208 and fix issue29 use new py version to avoid long pauses
+ when printing tracebacks in long modules
+
+- fix issue205 - conftests in subdirs customizing
+ pytest_pycollect_makemodule and pytest_pycollect_makeitem
+ now work properly
+
+- fix teardown-ordering for parametrized setups
+
+- fix issue127 - better documentation for pytest_addoption
+ and related objects.
+
+- fix unittest behaviour: TestCase.runtest only called if there are
+ test methods defined
+
+- improve trial support: don't collect its empty
+ unittest.TestCase.runTest() method
+
+- "python setup.py test" now works with pytest itself
+
+- fix/improve internal/packaging related bits:
+
+ - exception message check of test_nose.py now passes on python33 as well
+
+ - issue206 - fix test_assertrewrite.py to work when a global
+ PYTHONDONTWRITEBYTECODE=1 is present
+
+ - add tox.ini to pytest distribution so that ignore-dirs and others config
+ bits are properly distributed for maintainers who run pytest-own tests
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.3.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.3.rst
new file mode 100644
index 000000000..1d7c7027b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.3.rst
@@ -0,0 +1,62 @@
+pytest-2.3.3: integration fixes, py24 suport, ``*/**`` shown in traceback
+===========================================================================
+
+pytest-2.3.3 is a another stabilization release of the py.test tool
+which offers uebersimple assertions, scalable fixture mechanisms
+and deep customization for testing with Python. Particularly,
+this release provides:
+
+- integration fixes and improvements related to flask, numpy, nose,
+ unittest, mock
+
+- makes pytest work on py24 again (yes, people sometimes still need to use it)
+
+- show ``*,**`` args in pytest tracebacks
+
+Thanks to Manuel Jacob, Thomas Waldmann, Ronny Pfannschmidt, Pavel Repin
+and Andreas Taumoefolau for providing patches and all for the issues.
+
+See
+
+ http://pytest.org/
+
+for general information. To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel
+
+Changes between 2.3.2 and 2.3.3
+-----------------------------------
+
+- fix issue214 - parse modules that contain special objects like e. g.
+ flask's request object which blows up on getattr access if no request
+ is active. thanks Thomas Waldmann.
+
+- fix issue213 - allow to parametrize with values like numpy arrays that
+ do not support an __eq__ operator
+
+- fix issue215 - split test_python.org into multiple files
+
+- fix issue148 - @unittest.skip on classes is now recognized and avoids
+ calling setUpClass/tearDownClass, thanks Pavel Repin
+
+- fix issue209 - reintroduce python2.4 support by depending on newer
+ pylib which re-introduced statement-finding for pre-AST interpreters
+
+- nose support: only call setup if its a callable, thanks Andrew
+ Taumoefolau
+
+- fix issue219 - add py2.4-3.3 classifiers to TROVE list
+
+- in tracebacks *,** arg values are now shown next to normal arguments
+ (thanks Manuel Jacob)
+
+- fix issue217 - support mock.patch with pytest's fixtures - note that
+ you need either mock-1.0.1 or the python3.3 builtin unittest.mock.
+
+- fix issue127 - improve documentation for pytest_addoption() and
+ add a ``config.getoption(name)`` helper function for consistency.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.4.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.4.rst
new file mode 100644
index 000000000..d6c597b54
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.4.rst
@@ -0,0 +1,39 @@
+pytest-2.3.4: stabilization, more flexible selection via "-k expr"
+===========================================================================
+
+pytest-2.3.4 is a small stabilization release of the py.test tool
+which offers uebersimple assertions, scalable fixture mechanisms
+and deep customization for testing with Python. This release
+comes with the following fixes and features:
+
+- make "-k" option accept an expressions the same as with "-m" so that one
+ can write: -k "name1 or name2" etc. This is a slight usage incompatibility
+ if you used special syntax like "TestClass.test_method" which you now
+ need to write as -k "TestClass and test_method" to match a certain
+ method in a certain test class.
+- allow to dynamically define markers via
+ item.keywords[...]=assignment integrating with "-m" option
+- yielded test functions will now have autouse-fixtures active but
+ cannot accept fixtures as funcargs - it's anyway recommended to
+ rather use the post-2.0 parametrize features instead of yield, see:
+ http://pytest.org/latest/example/parametrize.html
+- fix autouse-issue where autouse-fixtures would not be discovered
+ if defined in a a/conftest.py file and tests in a/tests/test_some.py
+- fix issue226 - LIFO ordering for fixture teardowns
+- fix issue224 - invocations with >256 char arguments now work
+- fix issue91 - add/discuss package/directory level setups in example
+- fixes related to autouse discovery and calling
+
+Thanks in particular to Thomas Waldmann for spotting and reporting issues.
+
+See
+
+ http://pytest.org/
+
+for general information. To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+best,
+holger krekel
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.5.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.5.rst
new file mode 100644
index 000000000..c4e91e0e6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.3.5.rst
@@ -0,0 +1,97 @@
+pytest-2.3.5: bug fixes and little improvements
+===========================================================================
+
+pytest-2.3.5 is a maintenance release with many bug fixes and little
+improvements. See the changelog below for details. No backward
+compatibility issues are foreseen and all plugins which worked with the
+prior version are expected to work unmodified. Speaking of which, a
+few interesting new plugins saw the light last month:
+
+- pytest-instafail: show failure information while tests are running
+- pytest-qt: testing of GUI applications written with QT/Pyside
+- pytest-xprocess: managing external processes across test runs
+- pytest-random: randomize test ordering
+
+And several others like pytest-django saw maintenance releases.
+For a more complete list, check out
+https://pypi.python.org/pypi?%3Aaction=search&term=pytest&submit=search.
+
+For general information see:
+
+ http://pytest.org/
+
+To install or upgrade pytest:
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+Particular thanks to Floris, Ronny, Benjamin and the many bug reporters
+and fix providers.
+
+may the fixtures be with you,
+holger krekel
+
+
+Changes between 2.3.4 and 2.3.5
+-----------------------------------
+
+- never consider a fixture function for test function collection
+
+- allow re-running of test items / helps to fix pytest-reruntests plugin
+ and also help to keep less fixture/resource references alive
+
+- put captured stdout/stderr into junitxml output even for passing tests
+ (thanks Adam Goucher)
+
+- Issue 265 - integrate nose setup/teardown with setupstate
+ so it doesnt try to teardown if it did not setup
+
+- issue 271 - dont write junitxml on slave nodes
+
+- Issue 274 - dont try to show full doctest example
+ when doctest does not know the example location
+
+- issue 280 - disable assertion rewriting on buggy CPython 2.6.0
+
+- inject "getfixture()" helper to retrieve fixtures from doctests,
+ thanks Andreas Zeidler
+
+- issue 259 - when assertion rewriting, be consistent with the default
+ source encoding of ASCII on Python 2
+
+- issue 251 - report a skip instead of ignoring classes with init
+
+- issue250 unicode/str mixes in parametrization names and values now works
+
+- issue257, assertion-triggered compilation of source ending in a
+ comment line doesn't blow up in python2.5 (fixed through py>=1.4.13.dev6)
+
+- fix --genscript option to generate standalone scripts that also
+ work with python3.3 (importer ordering)
+
+- issue171 - in assertion rewriting, show the repr of some
+ global variables
+
+- fix option help for "-k"
+
+- move long description of distribution into README.rst
+
+- improve docstring for metafunc.parametrize()
+
+- fix bug where using capsys with pytest.set_trace() in a test
+ function would break when looking at capsys.readouterr()
+
+- allow to specify prefixes starting with "_" when
+ customizing python_functions test discovery. (thanks Graham Horler)
+
+- improve PYTEST_DEBUG tracing output by puting
+ extra data on a new lines with additional indent
+
+- ensure OutcomeExceptions like skip/fail have initialized exception attributes
+
+- issue 260 - don't use nose special setup on plain unittest cases
+
+- fix issue134 - print the collect errors that prevent running specified test items
+
+- fix issue266 - accept unicode in MarkEvaluator expressions
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.0.rst
new file mode 100644
index 000000000..88130c481
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.0.rst
@@ -0,0 +1,225 @@
+pytest-2.4.0: new fixture features/hooks and bug fixes
+===========================================================================
+
+The just released pytest-2.4.0 brings many improvements and numerous
+bug fixes while remaining plugin- and test-suite compatible apart
+from a few supposedly very minor incompatibilities. See below for
+a full list of details. A few feature highlights:
+
+- new yield-style fixtures `pytest.yield_fixture
+ <http://pytest.org/latest/yieldfixture.html>`_, allowing to use
+ existing with-style context managers in fixture functions.
+
+- improved pdb support: ``import pdb ; pdb.set_trace()`` now works
+ without requiring prior disabling of stdout/stderr capturing.
+ Also the ``--pdb`` options works now on collection and internal errors
+ and we introduced a new experimental hook for IDEs/plugins to
+ intercept debugging: ``pytest_exception_interact(node, call, report)``.
+
+- shorter monkeypatch variant to allow specifying an import path as
+ a target, for example: ``monkeypatch.setattr("requests.get", myfunc)``
+
+- better unittest/nose compatibility: all teardown methods are now only
+ called if the corresponding setup method succeeded.
+
+- integrate tab-completion on command line options if you
+ have `argcomplete <http://pypi.python.org/pypi/argcomplete>`_
+ configured.
+
+- allow boolean expression directly with skipif/xfail
+ if a "reason" is also specified.
+
+- a new hook ``pytest_load_initial_conftests`` allows plugins like
+ `pytest-django <http://pypi.python.org/pypi/pytest-django>`_ to
+ influence the environment before conftest files import ``django``.
+
+- reporting: color the last line red or green depending if
+ failures/errors occurred or everything passed.
+
+The documentation has been updated to accomodate the changes,
+see `http://pytest.org <http://pytest.org>`_
+
+To install or upgrade pytest::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+
+**Many thanks to all who helped, including Floris Bruynooghe,
+Brianna Laugher, Andreas Pelme, Anthon van der Neut, Anatoly Bubenkoff,
+Vladimir Keleshev, Mathieu Agopian, Ronny Pfannschmidt, Christian
+Theunert and many others.**
+
+may passing tests be with you,
+
+holger krekel
+
+Changes between 2.3.5 and 2.4
+-----------------------------------
+
+known incompatibilities:
+
+- if calling --genscript from python2.7 or above, you only get a
+ standalone script which works on python2.7 or above. Use Python2.6
+ to also get a python2.5 compatible version.
+
+- all xunit-style teardown methods (nose-style, pytest-style,
+ unittest-style) will not be called if the corresponding setup method failed,
+ see issue322 below.
+
+- the pytest_plugin_unregister hook wasn't ever properly called
+ and there is no known implementation of the hook - so it got removed.
+
+- pytest.fixture-decorated functions cannot be generators (i.e. use
+ yield) anymore. This change might be reversed in 2.4.1 if it causes
+ unforeseen real-life issues. However, you can always write and return
+ an inner function/generator and change the fixture consumer to iterate
+ over the returned generator. This change was done in lieu of the new
+ ``pytest.yield_fixture`` decorator, see below.
+
+new features:
+
+- experimentally introduce a new ``pytest.yield_fixture`` decorator
+ which accepts exactly the same parameters as pytest.fixture but
+ mandates a ``yield`` statement instead of a ``return statement`` from
+ fixture functions. This allows direct integration with "with-style"
+ context managers in fixture functions and generally avoids registering
+ of finalization callbacks in favour of treating the "after-yield" as
+ teardown code. Thanks Andreas Pelme, Vladimir Keleshev, Floris
+ Bruynooghe, Ronny Pfannschmidt and many others for discussions.
+
+- allow boolean expression directly with skipif/xfail
+ if a "reason" is also specified. Rework skipping documentation
+ to recommend "condition as booleans" because it prevents surprises
+ when importing markers between modules. Specifying conditions
+ as strings will remain fully supported.
+
+- reporting: color the last line red or green depending if
+ failures/errors occurred or everything passed. thanks Christian
+ Theunert.
+
+- make "import pdb ; pdb.set_trace()" work natively wrt capturing (no
+ "-s" needed anymore), making ``pytest.set_trace()`` a mere shortcut.
+
+- fix issue181: --pdb now also works on collect errors (and
+ on internal errors) . This was implemented by a slight internal
+ refactoring and the introduction of a new hook
+ ``pytest_exception_interact`` hook (see next item).
+
+- fix issue341: introduce new experimental hook for IDEs/terminals to
+ intercept debugging: ``pytest_exception_interact(node, call, report)``.
+
+- new monkeypatch.setattr() variant to provide a shorter
+ invocation for patching out classes/functions from modules:
+
+ monkeypatch.setattr("requests.get", myfunc)
+
+ will replace the "get" function of the "requests" module with ``myfunc``.
+
+- fix issue322: tearDownClass is not run if setUpClass failed. Thanks
+ Mathieu Agopian for the initial fix. Also make all of pytest/nose
+ finalizer mimick the same generic behaviour: if a setupX exists and
+ fails, don't run teardownX. This internally introduces a new method
+ "node.addfinalizer()" helper which can only be called during the setup
+ phase of a node.
+
+- simplify pytest.mark.parametrize() signature: allow to pass a
+ CSV-separated string to specify argnames. For example:
+ ``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])``
+ works as well as the previous:
+ ``pytest.mark.parametrize(("input", "expected"), ...)``.
+
+- add support for setUpModule/tearDownModule detection, thanks Brian Okken.
+
+- integrate tab-completion on options through use of "argcomplete".
+ Thanks Anthon van der Neut for the PR.
+
+- change option names to be hyphen-separated long options but keep the
+ old spelling backward compatible. py.test -h will only show the
+ hyphenated version, for example "--collect-only" but "--collectonly"
+ will remain valid as well (for backward-compat reasons). Many thanks to
+ Anthon van der Neut for the implementation and to Hynek Schlawack for
+ pushing us.
+
+- fix issue 308 - allow to mark/xfail/skip individual parameter sets
+ when parametrizing. Thanks Brianna Laugher.
+
+- call new experimental pytest_load_initial_conftests hook to allow
+ 3rd party plugins to do something before a conftest is loaded.
+
+Bug fixes:
+
+- fix issue358 - capturing options are now parsed more properly
+ by using a new parser.parse_known_args method.
+
+- pytest now uses argparse instead of optparse (thanks Anthon) which
+ means that "argparse" is added as a dependency if installing into python2.6
+ environments or below.
+
+- fix issue333: fix a case of bad unittest/pytest hook interaction.
+
+- PR27: correctly handle nose.SkipTest during collection. Thanks
+ Antonio Cuni, Ronny Pfannschmidt.
+
+- fix issue355: junitxml puts name="pytest" attribute to testsuite tag.
+
+- fix issue336: autouse fixture in plugins should work again.
+
+- fix issue279: improve object comparisons on assertion failure
+ for standard datatypes and recognise collections.abc. Thanks to
+ Brianna Laugher and Mathieu Agopian.
+
+- fix issue317: assertion rewriter support for the is_package method
+
+- fix issue335: document py.code.ExceptionInfo() object returned
+ from pytest.raises(), thanks Mathieu Agopian.
+
+- remove implicit distribute_setup support from setup.py.
+
+- fix issue305: ignore any problems when writing pyc files.
+
+- SO-17664702: call fixture finalizers even if the fixture function
+ partially failed (finalizers would not always be called before)
+
+- fix issue320 - fix class scope for fixtures when mixed with
+ module-level functions. Thanks Anatloy Bubenkoff.
+
+- you can specify "-q" or "-qq" to get different levels of "quieter"
+ reporting (thanks Katarzyna Jachim)
+
+- fix issue300 - Fix order of conftest loading when starting py.test
+ in a subdirectory.
+
+- fix issue323 - sorting of many module-scoped arg parametrizations
+
+- make sessionfinish hooks execute with the same cwd-context as at
+ session start (helps fix plugin behaviour which write output files
+ with relative path such as pytest-cov)
+
+- fix issue316 - properly reference collection hooks in docs
+
+- fix issue 306 - cleanup of -k/-m options to only match markers/test
+ names/keywords respectively. Thanks Wouter van Ackooy.
+
+- improved doctest counting for doctests in python modules --
+ files without any doctest items will not show up anymore
+ and doctest examples are counted as separate test items.
+ thanks Danilo Bellini.
+
+- fix issue245 by depending on the released py-1.4.14
+ which fixes py.io.dupfile to work with files with no
+ mode. Thanks Jason R. Coombs.
+
+- fix junitxml generation when test output contains control characters,
+ addressing issue267, thanks Jaap Broekhuizen
+
+- fix issue338: honor --tb style for setup/teardown errors as well. Thanks Maho.
+
+- fix issue307 - use yaml.safe_load in example, thanks Mark Eichin.
+
+- better parametrize error messages, thanks Brianna Laugher
+
+- pytest_terminal_summary(terminalreporter) hooks can now use
+ ".section(title)" and ".line(msg)" methods to print extra
+ information at the end of a test run.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.1.rst
new file mode 100644
index 000000000..64ba170f8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.1.rst
@@ -0,0 +1,25 @@
+pytest-2.4.1: fixing three regressions compared to 2.3.5
+===========================================================================
+
+pytest-2.4.1 is a quick follow up release to fix three regressions
+compared to 2.3.5 before they hit more people:
+
+- When using parser.addoption() unicode arguments to the
+ "type" keyword should also be converted to the respective types.
+ thanks Floris Bruynooghe, @dnozay. (fixes issue360 and issue362)
+
+- fix dotted filename completion when using argcomplete
+ thanks Anthon van der Neuth. (fixes issue361)
+
+- fix regression when a 1-tuple ("arg",) is used for specifying
+ parametrization (the values of the parametrization were passed
+ nested in a tuple). Thanks Donald Stufft.
+
+- also merge doc typo fixes, thanks Andy Dirnberger
+
+as usual, docs at http://pytest.org and upgrades via::
+
+ pip install -U pytest
+
+have fun,
+holger krekel
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.2.rst
new file mode 100644
index 000000000..3b4aa95ab
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.4.2.rst
@@ -0,0 +1,39 @@
+pytest-2.4.2: colorama on windows, plugin/tmpdir fixes
+===========================================================================
+
+pytest-2.4.2 is another bug-fixing release:
+
+- on Windows require colorama and a newer py lib so that py.io.TerminalWriter()
+ now uses colorama instead of its own ctypes hacks. (fixes issue365)
+ thanks Paul Moore for bringing it up.
+
+- fix "-k" matching of tests where "repr" and "attr" and other names would
+ cause wrong matches because of an internal implementation quirk
+ (don't ask) which is now properly implemented. fixes issue345.
+
+- avoid tmpdir fixture to create too long filenames especially
+ when parametrization is used (issue354)
+
+- fix pytest-pep8 and pytest-flakes / pytest interactions
+ (collection names in mark plugin was assuming an item always
+ has a function which is not true for those plugins etc.)
+ Thanks Andi Zeidler.
+
+- introduce node.get_marker/node.add_marker API for plugins
+ like pytest-pep8 and pytest-flakes to avoid the messy
+ details of the node.keywords pseudo-dicts. Adapted
+ docs.
+
+- remove attempt to "dup" stdout at startup as it's icky.
+ the normal capturing should catch enough possibilities
+ of tests messing up standard FDs.
+
+- add pluginmanager.do_configure(config) as a link to
+ config.do_configure() for plugin-compatibility
+
+as usual, docs at http://pytest.org and upgrades via::
+
+ pip install -U pytest
+
+have fun,
+holger krekel
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.0.rst
new file mode 100644
index 000000000..b8f28d6fd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.0.rst
@@ -0,0 +1,175 @@
+pytest-2.5.0: now down to ZERO reported bugs!
+===========================================================================
+
+pytest-2.5.0 is a big fixing release, the result of two community bug
+fixing days plus numerous additional works from many people and
+reporters. The release should be fully compatible to 2.4.2, existing
+plugins and test suites. We aim at maintaining this level of ZERO reported
+bugs because it's no fun if your testing tool has bugs, is it? Under a
+condition, though: when submitting a bug report please provide
+clear information about the circumstances and a simple example which
+reproduces the problem.
+
+The issue tracker is of course not empty now. We have many remaining
+"enhacement" issues which we'll hopefully can tackle in 2014 with your
+help.
+
+For those who use older Python versions, please note that pytest is not
+automatically tested on python2.5 due to virtualenv, setuptools and tox
+not supporting it anymore. Manual verification shows that it mostly
+works fine but it's not going to be part of the automated release
+process and thus likely to break in the future.
+
+As usual, current docs are at
+
+ http://pytest.org
+
+and you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Particular thanks for helping with this release go to Anatoly Bubenkoff,
+Floris Bruynooghe, Marc Abramowitz, Ralph Schmitt, Ronny Pfannschmidt,
+Donald Stufft, James Lan, Rob Dennis, Jason R. Coombs, Mathieu Agopian,
+Virgil Dupras, Bruno Oliveira, Alex Gaynor and others.
+
+have fun,
+holger krekel
+
+
+2.5.0
+-----------------------------------
+
+- dropped python2.5 from automated release testing of pytest itself
+ which means it's probably going to break soon (but still works
+ with this release we believe).
+
+- simplified and fixed implementation for calling finalizers when
+ parametrized fixtures or function arguments are involved. finalization
+ is now performed lazily at setup time instead of in the "teardown phase".
+ While this might sound odd at first, it helps to ensure that we are
+ correctly handling setup/teardown even in complex code. User-level code
+ should not be affected unless it's implementing the pytest_runtest_teardown
+ hook and expecting certain fixture instances are torn down within (very
+ unlikely and would have been unreliable anyway).
+
+- PR90: add --color=yes|no|auto option to force terminal coloring
+ mode ("auto" is default). Thanks Marc Abramowitz.
+
+- fix issue319 - correctly show unicode in assertion errors. Many
+ thanks to Floris Bruynooghe for the complete PR. Also means
+ we depend on py>=1.4.19 now.
+
+- fix issue396 - correctly sort and finalize class-scoped parametrized
+ tests independently from number of methods on the class.
+
+- refix issue323 in a better way -- parametrization should now never
+ cause Runtime Recursion errors because the underlying algorithm
+ for re-ordering tests per-scope/per-fixture is not recursive
+ anymore (it was tail-call recursive before which could lead
+ to problems for more than >966 non-function scoped parameters).
+
+- fix issue290 - there is preliminary support now for parametrizing
+ with repeated same values (sometimes useful to to test if calling
+ a second time works as with the first time).
+
+- close issue240 - document precisely how pytest module importing
+ works, discuss the two common test directory layouts, and how it
+ interacts with PEP420-namespace packages.
+
+- fix issue246 fix finalizer order to be LIFO on independent fixtures
+ depending on a parametrized higher-than-function scoped fixture.
+ (was quite some effort so please bear with the complexity of this sentence :)
+ Thanks Ralph Schmitt for the precise failure example.
+
+- fix issue244 by implementing special index for parameters to only use
+ indices for paramentrized test ids
+
+- fix issue287 by running all finalizers but saving the exception
+ from the first failing finalizer and re-raising it so teardown will
+ still have failed. We reraise the first failing exception because
+ it might be the cause for other finalizers to fail.
+
+- fix ordering when mock.patch or other standard decorator-wrappings
+ are used with test methods. This fixues issue346 and should
+ help with random "xdist" collection failures. Thanks to
+ Ronny Pfannschmidt and Donald Stufft for helping to isolate it.
+
+- fix issue357 - special case "-k" expressions to allow for
+ filtering with simple strings that are not valid python expressions.
+ Examples: "-k 1.3" matches all tests parametrized with 1.3.
+ "-k None" filters all tests that have "None" in their name
+ and conversely "-k 'not None'".
+ Previously these examples would raise syntax errors.
+
+- fix issue384 by removing the trial support code
+ since the unittest compat enhancements allow
+ trial to handle it on its own
+
+- don't hide an ImportError when importing a plugin produces one.
+ fixes issue375.
+
+- fix issue275 - allow usefixtures and autouse fixtures
+ for running doctest text files.
+
+- fix issue380 by making --resultlog only rely on longrepr instead
+ of the "reprcrash" attribute which only exists sometimes.
+
+- address issue122: allow @pytest.fixture(params=iterator) by exploding
+ into a list early on.
+
+- fix pexpect-3.0 compatibility for pytest's own tests.
+ (fixes issue386)
+
+- allow nested parametrize-value markers, thanks James Lan for the PR.
+
+- fix unicode handling with new monkeypatch.setattr(import_path, value)
+ API. Thanks Rob Dennis. Fixes issue371.
+
+- fix unicode handling with junitxml, fixes issue368.
+
+- In assertion rewriting mode on Python 2, fix the detection of coding
+ cookies. See issue #330.
+
+- make "--runxfail" turn imperative pytest.xfail calls into no ops
+ (it already did neutralize pytest.mark.xfail markers)
+
+- refine pytest / pkg_resources interactions: The AssertionRewritingHook
+ PEP302 compliant loader now registers itself with setuptools/pkg_resources
+ properly so that the pkg_resources.resource_stream method works properly.
+ Fixes issue366. Thanks for the investigations and full PR to Jason R. Coombs.
+
+- pytestconfig fixture is now session-scoped as it is the same object during the
+ whole test run. Fixes issue370.
+
+- avoid one surprising case of marker malfunction/confusion::
+
+ @pytest.mark.some(lambda arg: ...)
+ def test_function():
+
+ would not work correctly because pytest assumes @pytest.mark.some
+ gets a function to be decorated already. We now at least detect if this
+ arg is an lambda and thus the example will work. Thanks Alex Gaynor
+ for bringing it up.
+
+- xfail a test on pypy that checks wrong encoding/ascii (pypy does
+ not error out). fixes issue385.
+
+- internally make varnames() deal with classes's __init__,
+ although it's not needed by pytest itself atm. Also
+ fix caching. Fixes issue376.
+
+- fix issue221 - handle importing of namespace-package with no
+ __init__.py properly.
+
+- refactor internal FixtureRequest handling to avoid monkeypatching.
+ One of the positive user-facing effects is that the "request" object
+ can now be used in closures.
+
+- fixed version comparison in pytest.importskip(modname, minverstring)
+
+- fix issue377 by clarifying in the nose-compat docs that pytest
+ does not duplicate the unittest-API into the "plain" namespace.
+
+- fix verbose reporting for @mock'd test functions
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.1.rst
new file mode 100644
index 000000000..a3a74cec6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.1.rst
@@ -0,0 +1,47 @@
+pytest-2.5.1: fixes and new home page styling
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1000 tests
+against itself, passing on many different interpreters and platforms.
+
+The 2.5.1 release maintains the "zero-reported-bugs" promise by fixing
+the three bugs reported since the last release a few days ago. It also
+features a new home page styling implemented by Tobias Bieniek, based on
+the flask theme from Armin Ronacher:
+
+ http://pytest.org
+
+If you have anything more to improve styling and docs,
+we'd be very happy to merge further pull requests.
+
+On the coding side, the release also contains a little enhancement to
+fixture decorators allowing to directly influence generation of test
+ids, thanks to Floris Bruynooghe. Other thanks for helping with
+this release go to Anatoly Bubenkoff and Ronny Pfannschmidt.
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+have fun and a nice remaining "bug-free" time of the year :)
+holger krekel
+
+2.5.1
+-----------------------------------
+
+- merge new documentation styling PR from Tobias Bieniek.
+
+- fix issue403: allow parametrize of multiple same-name functions within
+ a collection node. Thanks Andreas Kloeckner and Alex Gaynor for reporting
+ and analysis.
+
+- Allow parameterized fixtures to specify the ID of the parameters by
+ adding an ids argument to pytest.fixture() and pytest.yield_fixture().
+ Thanks Floris Bruynooghe.
+
+- fix issue404 by always using the binary xml escape in the junitxml
+ plugin. Thanks Ronny Pfannschmidt.
+
+- fix issue407: fix addoption docstring to point to argparse instead of
+ optparse. Thanks Daniel D. Wright.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.2.rst
new file mode 100644
index 000000000..9308ffdd6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.5.2.rst
@@ -0,0 +1,64 @@
+pytest-2.5.2: fixes
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1000 tests
+against itself, passing on many different interpreters and platforms.
+
+The 2.5.2 release fixes a few bugs with two maybe-bugs remaining and
+actively being worked on (and waiting for the bug reporter's input).
+We also have a new contribution guide thanks to Piotr Banaszkiewicz
+and others.
+
+See docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to the following people who contributed to this release:
+
+ Anatoly Bubenkov
+ Ronny Pfannschmidt
+ Floris Bruynooghe
+ Bruno Oliveira
+ Andreas Pelme
+ Jurko Gospodnetić
+ Piotr Banaszkiewicz
+ Simon Liedtke
+ lakka
+ Lukasz Balcerzak
+ Philippe Muller
+ Daniel Hahler
+
+have fun,
+holger krekel
+
+2.5.2
+-----------------------------------
+
+- fix issue409 -- better interoperate with cx_freeze by not
+ trying to import from collections.abc which causes problems
+ for py27/cx_freeze. Thanks Wolfgang L. for reporting and tracking it down.
+
+- fixed docs and code to use "pytest" instead of "py.test" almost everywhere.
+ Thanks Jurko Gospodnetic for the complete PR.
+
+- fix issue425: mention at end of "py.test -h" that --markers
+ and --fixtures work according to specified test path (or current dir)
+
+- fix issue413: exceptions with unicode attributes are now printed
+ correctly also on python2 and with pytest-xdist runs. (the fix
+ requires py-1.4.20)
+
+- copy, cleanup and integrate py.io capture
+ from pylib 1.4.20.dev2 (rev 13d9af95547e)
+
+- address issue416: clarify docs as to conftest.py loading semantics
+
+- fix issue429: comparing byte strings with non-ascii chars in assert
+ expressions now work better. Thanks Floris Bruynooghe.
+
+- make capfd/capsys.capture private, its unused and shouldnt be exposed
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.0.rst
new file mode 100644
index 000000000..36b545a28
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.0.rst
@@ -0,0 +1,153 @@
+pytest-2.6.0: shorter tracebacks, new warning system, test runner compat
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1000 tests
+against itself, passing on many different interpreters and platforms.
+
+The 2.6.0 release should be drop-in backward compatible to 2.5.2 and
+fixes a number of bugs and brings some new features, mainly:
+
+- shorter tracebacks by default: only the first (test function) entry
+ and the last (failure location) entry are shown, the ones between
+ only in "short" format. Use ``--tb=long`` to get back the old
+ behaviour of showing "long" entries everywhere.
+
+- a new warning system which reports oddities during collection
+ and execution. For example, ignoring collecting Test* classes with an
+ ``__init__`` now produces a warning.
+
+- various improvements to nose/mock/unittest integration
+
+Note also that 2.6.0 departs with the "zero reported bugs" policy
+because it has been too hard to keep up with it, unfortunately.
+Instead we are for now rather bound to work on "upvoted" issues in
+the https://bitbucket.org/pytest-dev/pytest/issues?status=new&status=open&sort=-votes
+issue tracker.
+
+See docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed, among them:
+
+ Benjamin Peterson
+ Jurko Gospodnetić
+ Floris Bruynooghe
+ Marc Abramowitz
+ Marc Schlaich
+ Trevor Bekolay
+ Bruno Oliveira
+ Alex Groenholm
+
+have fun,
+holger krekel
+
+2.6.0
+-----------------------------------
+
+- fix issue537: Avoid importing old assertion reinterpretation code by default.
+ Thanks Benjamin Peterson.
+
+- fix issue364: shorten and enhance tracebacks representation by default.
+ The new "--tb=auto" option (default) will only display long tracebacks
+ for the first and last entry. You can get the old behaviour of printing
+ all entries as long entries with "--tb=long". Also short entries by
+ default are now printed very similarly to "--tb=native" ones.
+
+- fix issue514: teach assertion reinterpretation about private class attributes
+ Thanks Benjamin Peterson.
+
+- change -v output to include full node IDs of tests. Users can copy
+ a node ID from a test run, including line number, and use it as a
+ positional argument in order to run only a single test.
+
+- fix issue 475: fail early and comprehensible if calling
+ pytest.raises with wrong exception type.
+
+- fix issue516: tell in getting-started about current dependencies.
+
+- cleanup setup.py a bit and specify supported versions. Thanks Jurko
+ Gospodnetic for the PR.
+
+- change XPASS colour to yellow rather then red when tests are run
+ with -v.
+
+- fix issue473: work around mock putting an unbound method into a class
+ dict when double-patching.
+
+- fix issue498: if a fixture finalizer fails, make sure that
+ the fixture is still invalidated.
+
+- fix issue453: the result of the pytest_assertrepr_compare hook now gets
+ it's newlines escaped so that format_exception does not blow up.
+
+- internal new warning system: pytest will now produce warnings when
+ it detects oddities in your test collection or execution.
+ Warnings are ultimately sent to a new pytest_logwarning hook which is
+ currently only implemented by the terminal plugin which displays
+ warnings in the summary line and shows more details when -rw (report on
+ warnings) is specified.
+
+- change skips into warnings for test classes with an __init__ and
+ callables in test modules which look like a test but are not functions.
+
+- fix issue436: improved finding of initial conftest files from command
+ line arguments by using the result of parse_known_args rather than
+ the previous flaky heuristics. Thanks Marc Abramowitz for tests
+ and initial fixing approaches in this area.
+
+- fix issue #479: properly handle nose/unittest(2) SkipTest exceptions
+ during collection/loading of test modules. Thanks to Marc Schlaich
+ for the complete PR.
+
+- fix issue490: include pytest_load_initial_conftests in documentation
+ and improve docstring.
+
+- fix issue472: clarify that ``pytest.config.getvalue()`` cannot work
+ if it's triggered ahead of command line parsing.
+
+- merge PR123: improved integration with mock.patch decorator on tests.
+
+- fix issue412: messing with stdout/stderr FD-level streams is now
+ captured without crashes.
+
+- fix issue483: trial/py33 works now properly. Thanks Daniel Grana for PR.
+
+- improve example for pytest integration with "python setup.py test"
+ which now has a generic "-a" or "--pytest-args" option where you
+ can pass additional options as a quoted string. Thanks Trevor Bekolay.
+
+- simplified internal capturing mechanism and made it more robust
+ against tests or setups changing FD1/FD2, also better integrated
+ now with pytest.pdb() in single tests.
+
+- improvements to pytest's own test-suite leakage detection, courtesy of PRs
+ from Marc Abramowitz
+
+- fix issue492: avoid leak in test_writeorg. Thanks Marc Abramowitz.
+
+- fix issue493: don't run tests in doc directory with ``python setup.py test``
+ (use tox -e doctesting for that)
+
+- fix issue486: better reporting and handling of early conftest loading failures
+
+- some cleanup and simplification of internal conftest handling.
+
+- work a bit harder to break reference cycles when catching exceptions.
+ Thanks Jurko Gospodnetic.
+
+- fix issue443: fix skip examples to use proper comparison. Thanks Alex
+ Groenholm.
+
+- support nose-style ``__test__`` attribute on modules, classes and
+ functions, including unittest-style Classes. If set to False, the
+ test will not be collected.
+
+- fix issue512: show "<notset>" for arguments which might not be set
+ in monkeypatch plugin. Improves output in documentation.
+
+- avoid importing "py.test" (an old alias module for "pytest")
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.1.rst
new file mode 100644
index 000000000..6f27c5861
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.1.rst
@@ -0,0 +1,59 @@
+pytest-2.6.1: fixes and new xfail feature
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+The 2.6.1 release is drop-in compatible to 2.5.2 and actually fixes some
+regressions introduced with 2.6.0. It also brings a little feature
+to the xfail marker which now recognizes expected exceptions,
+see the CHANGELOG below.
+
+See docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed, among them:
+
+ Floris Bruynooghe
+ Bruno Oliveira
+ Nicolas Delaby
+
+have fun,
+holger krekel
+
+Changes 2.6.1
+=================
+
+- No longer show line numbers in the --verbose output, the output is now
+ purely the nodeid. The line number is still shown in failure reports.
+ Thanks Floris Bruynooghe.
+
+- fix issue437 where assertion rewriting could cause pytest-xdist slaves
+ to collect different tests. Thanks Bruno Oliveira.
+
+- fix issue555: add "errors" attribute to capture-streams to satisfy
+ some distutils and possibly other code accessing sys.stdout.errors.
+
+- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
+
+- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
+ an optional "raises=EXC" argument where EXC can be a single exception
+ or a tuple of exception classes. Thanks David Mohr for the complete
+ PR.
+
+- fix integration of pytest with unittest.mock.patch decorator when
+ it uses the "new" argument. Thanks Nicolas Delaby for test and PR.
+
+- fix issue with detecting conftest files if the arguments contain
+ "::" node id specifications (copy pasted from "-v" output)
+
+- fix issue544 by only removing "@NUM" at the end of "::" separated parts
+ and if the part has an ".py" extension
+
+- don't use py.std import helper, rather import things directly.
+ Thanks Bruno Oliveira.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.2.rst
new file mode 100644
index 000000000..4efc73a4e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.2.rst
@@ -0,0 +1,52 @@
+pytest-2.6.2: few fixes and cx_freeze support
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is drop-in compatible to 2.5.2 and 2.6.X. It also
+brings support for including pytest with cx_freeze or similar
+freezing tools into your single-file app distribution. For details
+see the CHANGELOG below.
+
+See docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed, among them:
+
+ Floris Bruynooghe
+ Benjamin Peterson
+ Bruno Oliveira
+
+have fun,
+holger krekel
+
+2.6.2
+-----------
+
+- Added function pytest.freeze_includes(), which makes it easy to embed
+ pytest into executables using tools like cx_freeze.
+ See docs for examples and rationale. Thanks Bruno Oliveira.
+
+- Improve assertion rewriting cache invalidation precision.
+
+- fixed issue561: adapt autouse fixture example for python3.
+
+- fixed issue453: assertion rewriting issue with __repr__ containing
+ "\n{", "\n}" and "\n~".
+
+- fix issue560: correctly display code if an "else:" or "finally:" is
+ followed by statements on the same line.
+
+- Fix example in monkeypatch documentation, thanks t-8ch.
+
+- fix issue572: correct tmpdir doc example for python3.
+
+- Do not mark as universal wheel because Python 2.6 is different from
+ other builds due to the extra argparse dependency. Fixes issue566.
+ Thanks sontek.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.3.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.3.rst
new file mode 100644
index 000000000..13fae31b8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.6.3.rst
@@ -0,0 +1,52 @@
+pytest-2.6.3: fixes and little improvements
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is drop-in compatible to 2.5.2 and 2.6.X.
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed, among them:
+
+ Floris Bruynooghe
+ Oleg Sinyavskiy
+ Uwe Schmitt
+ Charles Cloud
+ Wolfgang Schnerring
+
+have fun,
+holger krekel
+
+Changes 2.6.3
+======================
+
+- fix issue575: xunit-xml was reporting collection errors as failures
+ instead of errors, thanks Oleg Sinyavskiy.
+
+- fix issue582: fix setuptools example, thanks Laszlo Papp and Ronny
+ Pfannschmidt.
+
+- Fix infinite recursion bug when pickling capture.EncodedFile, thanks
+ Uwe Schmitt.
+
+- fix issue589: fix bad interaction with numpy and others when showing
+ exceptions. Check for precise "maximum recursion depth exceed" exception
+ instead of presuming any RuntimeError is that one (implemented in py
+ dep). Thanks Charles Cloud for analysing the issue.
+
+- fix conftest related fixture visibility issue: when running with a
+ CWD outside a test package pytest would get fixture discovery wrong.
+ Thanks to Wolfgang Schnerring for figuring out a reproducable example.
+
+- Introduce pytest_enter_pdb hook (needed e.g. by pytest_timeout to cancel the
+ timeout when interactively entering pdb). Thanks Wolfgang Schnerring.
+
+- check xfail/skip also with non-python function test items. Thanks
+ Floris Bruynooghe.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.0.rst
new file mode 100644
index 000000000..07ae44ca1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.0.rst
@@ -0,0 +1,101 @@
+pytest-2.7.0: fixes, features, speed improvements
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.6.X.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed, among them:
+
+ Anatoly Bubenkoff
+ Floris Bruynooghe
+ Brianna Laugher
+ Eric Siegerman
+ Daniel Hahler
+ Charles Cloud
+ Tom Viner
+ Holger Peters
+ Ldiary Translations
+ almarklein
+
+have fun,
+holger krekel
+
+2.7.0 (compared to 2.6.4)
+-----------------------------
+
+- fix issue435: make reload() work when assert rewriting is active.
+ Thanks Daniel Hahler.
+
+- fix issue616: conftest.py files and their contained fixutres are now
+ properly considered for visibility, independently from the exact
+ current working directory and test arguments that are used.
+ Many thanks to Eric Siegerman and his PR235 which contains
+ systematic tests for conftest visibility and now passes.
+ This change also introduces the concept of a ``rootdir`` which
+ is printed as a new pytest header and documented in the pytest
+ customize web page.
+
+- change reporting of "diverted" tests, i.e. tests that are collected
+ in one file but actually come from another (e.g. when tests in a test class
+ come from a base class in a different file). We now show the nodeid
+ and indicate via a postfix the other file.
+
+- add ability to set command line options by environment variable PYTEST_ADDOPTS.
+
+- added documentation on the new pytest-dev teams on bitbucket and
+ github. See https://pytest.org/latest/contributing.html .
+ Thanks to Anatoly for pushing and initial work on this.
+
+- fix issue650: new option ``--docttest-ignore-import-errors`` which
+ will turn import errors in doctests into skips. Thanks Charles Cloud
+ for the complete PR.
+
+- fix issue655: work around different ways that cause python2/3
+ to leak sys.exc_info into fixtures/tests causing failures in 3rd party code
+
+- fix issue615: assertion re-writing did not correctly escape % signs
+ when formatting boolean operations, which tripped over mixing
+ booleans with modulo operators. Thanks to Tom Viner for the report,
+ triaging and fix.
+
+- implement issue351: add ability to specify parametrize ids as a callable
+ to generate custom test ids. Thanks Brianna Laugher for the idea and
+ implementation.
+
+- introduce and document new hookwrapper mechanism useful for plugins
+ which want to wrap the execution of certain hooks for their purposes.
+ This supersedes the undocumented ``__multicall__`` protocol which
+ pytest itself and some external plugins use. Note that pytest-2.8
+ is scheduled to drop supporting the old ``__multicall__``
+ and only support the hookwrapper protocol.
+
+- majorly speed up invocation of plugin hooks
+
+- use hookwrapper mechanism in builtin pytest plugins.
+
+- add a doctest ini option for doctest flags, thanks Holger Peters.
+
+- add note to docs that if you want to mark a parameter and the
+ parameter is a callable, you also need to pass in a reason to disambiguate
+ it from the "decorator" case. Thanks Tom Viner.
+
+- "python_classes" and "python_functions" options now support glob-patterns
+ for test discovery, as discussed in issue600. Thanks Ldiary Translations.
+
+- allow to override parametrized fixtures with non-parametrized ones and vice versa (bubenkoff).
+
+- fix issue463: raise specific error for 'parameterize' misspelling (pfctdayelise).
+
+- On failure, the ``sys.last_value``, ``sys.last_type`` and
+ ``sys.last_traceback`` are set, so that a user can inspect the error
+ via postmortem debugging (almarklein).
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.1.rst
new file mode 100644
index 000000000..cd37cad0c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.1.rst
@@ -0,0 +1,58 @@
+pytest-2.7.1: bug fixes
+=======================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.7.0.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Bruno Oliveira
+ Holger Krekel
+ Ionel Maries Cristian
+ Floris Bruynooghe
+
+Happy testing,
+The py.test Development Team
+
+
+2.7.1 (compared to 2.7.0)
+-------------------------
+
+- fix issue731: do not get confused by the braces which may be present
+ and unbalanced in an object's repr while collapsing False
+ explanations. Thanks Carl Meyer for the report and test case.
+
+- fix issue553: properly handling inspect.getsourcelines failures in
+ FixtureLookupError which would lead to to an internal error,
+ obfuscating the original problem. Thanks talljosh for initial
+ diagnose/patch and Bruno Oliveira for final patch.
+
+- fix issue660: properly report scope-mismatch-access errors
+ independently from ordering of fixture arguments. Also
+ avoid the pytest internal traceback which does not provide
+ information to the user. Thanks Holger Krekel.
+
+- streamlined and documented release process. Also all versions
+ (in setup.py and documentation generation) are now read
+ from _pytest/__init__.py. Thanks Holger Krekel.
+
+- fixed docs to remove the notion that yield-fixtures are experimental.
+ They are here to stay :) Thanks Bruno Oliveira.
+
+- Support building wheels by using environment markers for the
+ requirements. Thanks Ionel Maries Cristian.
+
+- fixed regression to 2.6.4 which surfaced e.g. in lost stdout capture printing
+ when tests raised SystemExit. Thanks Holger Krekel.
+
+- reintroduced _pytest fixture of the pytester plugin which is used
+ at least by pytest-xdist.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.2.rst
new file mode 100644
index 000000000..69130ad62
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.7.2.rst
@@ -0,0 +1,58 @@
+pytest-2.7.2: bug fixes
+=======================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.7.1.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Bruno Oliveira
+ Floris Bruynooghe
+ Punyashloka Biswal
+ Aron Curzon
+ Benjamin Peterson
+ Thomas De Schampheleire
+ Edison Gustavo Muenz
+ Holger Krekel
+
+Happy testing,
+The py.test Development Team
+
+
+2.7.2 (compared to 2.7.1)
+-----------------------------
+
+- fix issue767: pytest.raises value attribute does not contain the exception
+ instance on Python 2.6. Thanks Eric Siegerman for providing the test
+ case and Bruno Oliveira for PR.
+
+- Automatically create directory for junitxml and results log.
+ Thanks Aron Curzon.
+
+- fix issue713: JUnit XML reports for doctest failures.
+ Thanks Punyashloka Biswal.
+
+- fix issue735: assertion failures on debug versions of Python 3.4+
+ Thanks Benjamin Peterson.
+
+- fix issue114: skipif marker reports to internal skipping plugin;
+ Thanks Floris Bruynooghe for reporting and Bruno Oliveira for the PR.
+
+- fix issue748: unittest.SkipTest reports to internal pytest unittest plugin.
+ Thanks Thomas De Schampheleire for reporting and Bruno Oliveira for the PR.
+
+- fix issue718: failed to create representation of sets containing unsortable
+ elements in python 2. Thanks Edison Gustavo Muenz
+
+- fix issue756, fix issue752 (and similar issues): depend on py-1.4.29
+ which has a refined algorithm for traceback generation.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.2.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.2.rst
new file mode 100644
index 000000000..d70286161
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.2.rst
@@ -0,0 +1,44 @@
+pytest-2.8.2: bug fixes
+=======================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.8.1.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Bruno Oliveira
+ Demian Brecht
+ Florian Bruhin
+ Ionel Cristian Mărieș
+ Raphael Pierzina
+ Ronny Pfannschmidt
+ holger krekel
+
+Happy testing,
+The py.test Development Team
+
+
+2.8.2 (compared to 2.7.2)
+-----------------------------
+
+- fix #1085: proper handling of encoding errors when passing encoded byte
+ strings to pytest.parametrize in Python 2.
+ Thanks Themanwithoutaplan for the report and Bruno Oliveira for the PR.
+
+- fix #1087: handling SystemError when passing empty byte strings to
+ pytest.parametrize in Python 3.
+ Thanks Paul Kehrer for the report and Bruno Oliveira for the PR.
+
+- fix #995: fixed internal error when filtering tracebacks where one entry
+ was generated by an exec() statement.
+ Thanks Daniel Hahler, Ashley C Straw, Philippe Gauthier and Pavel Savchenko
+ for contributing and Bruno Oliveira for the PR.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.3.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.3.rst
new file mode 100644
index 000000000..d080ac724
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.3.rst
@@ -0,0 +1,59 @@
+pytest-2.8.3: bug fixes
+=======================
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.8.2.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Bruno Oliveira
+ Florian Bruhin
+ Gabe Hollombe
+ Gabriel Reis
+ Hartmut Goebel
+ John Vandenberg
+ Lee Kamentsky
+ Michael Birtwell
+ Raphael Pierzina
+ Ronny Pfannschmidt
+ William Martin Stewart
+
+Happy testing,
+The py.test Development Team
+
+
+2.8.3 (compared to 2.8.2)
+-----------------------------
+
+- fix #1169: add __name__ attribute to testcases in TestCaseFunction to
+ support the @unittest.skip decorator on functions and methods.
+ Thanks Lee Kamentsky for the PR.
+
+- fix #1035: collecting tests if test module level obj has __getattr__().
+ Thanks Suor for the report and Bruno Oliveira / Tom Viner for the PR.
+
+- fix #331: don't collect tests if their failure cannot be reported correctly
+ e.g. they are a callable instance of a class.
+
+- fix #1133: fixed internal error when filtering tracebacks where one entry
+ belongs to a file which is no longer available.
+ Thanks Bruno Oliveira for the PR.
+
+- enhancement made to highlight in red the name of the failing tests so
+ they stand out in the output.
+ Thanks Gabriel Reis for the PR.
+
+- add more talks to the documentation
+- extend documentation on the --ignore cli option
+- use pytest-runner for setuptools integration
+- minor fixes for interaction with OS X El Capitan system integrity protection (thanks Florian)
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.4.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.4.rst
new file mode 100644
index 000000000..a09629cef
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.4.rst
@@ -0,0 +1,52 @@
+pytest-2.8.4
+============
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.8.2.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Bruno Oliveira
+ Florian Bruhin
+ Jeff Widman
+ Mehdy Khoshnoody
+ Nicholas Chammas
+ Ronny Pfannschmidt
+ Tim Chan
+
+
+Happy testing,
+The py.test Development Team
+
+
+2.8.4 (compared to 2.8.3)
+-----------------------------
+
+- fix #1190: ``deprecated_call()`` now works when the deprecated
+ function has been already called by another test in the same
+ module. Thanks Mikhail Chernykh for the report and Bruno Oliveira for the
+ PR.
+
+- fix #1198: ``--pastebin`` option now works on Python 3. Thanks
+ Mehdy Khoshnoody for the PR.
+
+- fix #1219: ``--pastebin`` now works correctly when captured output contains
+ non-ascii characters. Thanks Bruno Oliveira for the PR.
+
+- fix #1204: another error when collecting with a nasty __getattr__().
+ Thanks Florian Bruhin for the PR.
+
+- fix the summary printed when no tests did run.
+ Thanks Florian Bruhin for the PR.
+
+- a number of documentation modernizations wrt good practices.
+ Thanks Bruno Oliveira for the PR.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.5.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.5.rst
new file mode 100644
index 000000000..7409022a1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.5.rst
@@ -0,0 +1,39 @@
+pytest-2.8.5
+============
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.8.4.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Alex Gaynor
+ aselus-hub
+ Bruno Oliveira
+ Ronny Pfannschmidt
+
+
+Happy testing,
+The py.test Development Team
+
+
+2.8.5 (compared to 2.8.4)
+-------------------------
+
+- fix #1243: fixed issue where class attributes injected during collection could break pytest.
+ PR by Alexei Kozlenok, thanks Ronny Pfannschmidt and Bruno Oliveira for the review and help.
+
+- fix #1074: precompute junitxml chunks instead of storing the whole tree in objects
+ Thanks Bruno Oliveira for the report and Ronny Pfannschmidt for the PR
+
+- fix #1238: fix ``pytest.deprecated_call()`` receiving multiple arguments
+ (Regression introduced in 2.8.4). Thanks Alex Gaynor for the report and
+ Bruno Oliveira for the PR.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.6.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.6.rst
new file mode 100644
index 000000000..215fae51e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.6.rst
@@ -0,0 +1,67 @@
+pytest-2.8.6
+============
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.8.5.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ AMiT Kumar
+ Bruno Oliveira
+ Erik M. Bray
+ Florian Bruhin
+ Georgy Dyuldin
+ Jeff Widman
+ Kartik Singhal
+ Loïc Estève
+ Manu Phatak
+ Peter Demin
+ Rick van Hattem
+ Ronny Pfannschmidt
+ Ulrich Petri
+ foxx
+
+
+Happy testing,
+The py.test Development Team
+
+
+2.8.6 (compared to 2.8.5)
+-------------------------
+
+- fix #1259: allow for double nodeids in junitxml,
+ this was a regression failing plugins combinations
+ like pytest-pep8 + pytest-flakes
+
+- Workaround for exception that occurs in pyreadline when using
+ ``--pdb`` with standard I/O capture enabled.
+ Thanks Erik M. Bray for the PR.
+
+- fix #900: Better error message in case the target of a ``monkeypatch`` call
+ raises an ``ImportError``.
+
+- fix #1292: monkeypatch calls (setattr, setenv, etc.) are now O(1).
+ Thanks David R. MacIver for the report and Bruno Oliveira for the PR.
+
+- fix #1223: captured stdout and stderr are now properly displayed before
+ entering pdb when ``--pdb`` is used instead of being thrown away.
+ Thanks Cal Leeming for the PR.
+
+- fix #1305: pytest warnings emitted during ``pytest_terminal_summary`` are now
+ properly displayed.
+ Thanks Ionel Maries Cristian for the report and Bruno Oliveira for the PR.
+
+- fix #628: fixed internal UnicodeDecodeError when doctests contain unicode.
+ Thanks Jason R. Coombs for the report and Bruno Oliveira for the PR.
+
+- fix #1334: Add captured stdout to jUnit XML report on setup error.
+ Thanks Georgy Dyuldin for the PR.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.7.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.7.rst
new file mode 100644
index 000000000..d98d73106
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.8.7.rst
@@ -0,0 +1,31 @@
+pytest-2.8.7
+============
+
+This is a hotfix release to solve a regression
+in the builtin monkeypatch plugin that got introduced in 2.8.6.
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+This release is supposed to be drop-in compatible to 2.8.5.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Ronny Pfannschmidt
+
+
+Happy testing,
+The py.test Development Team
+
+
+2.8.7 (compared to 2.8.6)
+-------------------------
+
+- fix #1338: use predictable object resolution for monkeypatch \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.0.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.0.rst
new file mode 100644
index 000000000..99c1c631f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.0.rst
@@ -0,0 +1,159 @@
+pytest-2.9.0
+============
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Anatoly Bubenkov
+ Bruno Oliveira
+ Buck Golemon
+ David Vierra
+ Florian Bruhin
+ Galaczi Endre
+ Georgy Dyuldin
+ Lukas Bednar
+ Luke Murphy
+ Marcin Biernat
+ Matt Williams
+ Michael Aquilina
+ Raphael Pierzina
+ Ronny Pfannschmidt
+ Ryan Wooden
+ Tiemo Kieft
+ TomV
+ holger krekel
+ jab
+
+
+Happy testing,
+The py.test Development Team
+
+
+2.9.0 (compared to 2.8.7)
+-------------------------
+
+**New Features**
+
+* New ``pytest.mark.skip`` mark, which unconditionally skips marked tests.
+ Thanks `@MichaelAquilina`_ for the complete PR (`#1040`_).
+
+* ``--doctest-glob`` may now be passed multiple times in the command-line.
+ Thanks `@jab`_ and `@nicoddemus`_ for the PR.
+
+* New ``-rp`` and ``-rP`` reporting options give the summary and full output
+ of passing tests, respectively. Thanks to `@codewarrior0`_ for the PR.
+
+* ``pytest.mark.xfail`` now has a ``strict`` option which makes ``XPASS``
+ tests to fail the test suite, defaulting to ``False``. There's also a
+ ``xfail_strict`` ini option that can be used to configure it project-wise.
+ Thanks `@rabbbit`_ for the request and `@nicoddemus`_ for the PR (`#1355`_).
+
+* ``Parser.addini`` now supports options of type ``bool``. Thanks
+ `@nicoddemus`_ for the PR.
+
+* New ``ALLOW_BYTES`` doctest option strips ``b`` prefixes from byte strings
+ in doctest output (similar to ``ALLOW_UNICODE``).
+ Thanks `@jaraco`_ for the request and `@nicoddemus`_ for the PR (`#1287`_).
+
+* give a hint on KeyboardInterrupt to use the --fulltrace option to show the errors,
+ this fixes `#1366`_.
+ Thanks to `@hpk42`_ for the report and `@RonnyPfannschmidt`_ for the PR.
+
+* catch IndexError exceptions when getting exception source location. This fixes
+ pytest internal error for dynamically generated code (fixtures and tests)
+ where source lines are fake by intention
+
+**Changes**
+
+* **Important**: `py.code <http://pylib.readthedocs.org/en/latest/code.html>`_ has been
+ merged into the ``pytest`` repository as ``pytest._code``. This decision
+ was made because ``py.code`` had very few uses outside ``pytest`` and the
+ fact that it was in a different repository made it difficult to fix bugs on
+ its code in a timely manner. The team hopes with this to be able to better
+ refactor out and improve that code.
+ This change shouldn't affect users, but it is useful to let users aware
+ if they encounter any strange behavior.
+
+ Keep in mind that the code for ``pytest._code`` is **private** and
+ **experimental**, so you definitely should not import it explicitly!
+
+ Please note that the original ``py.code`` is still available in
+ `pylib <http://pylib.readthedocs.org>`_.
+
+* ``pytest_enter_pdb`` now optionally receives the pytest config object.
+ Thanks `@nicoddemus`_ for the PR.
+
+* Removed code and documentation for Python 2.5 or lower versions,
+ including removal of the obsolete ``_pytest.assertion.oldinterpret`` module.
+ Thanks `@nicoddemus`_ for the PR (`#1226`_).
+
+* Comparisons now always show up in full when ``CI`` or ``BUILD_NUMBER`` is
+ found in the environment, even when -vv isn't used.
+ Thanks `@The-Compiler`_ for the PR.
+
+* ``--lf`` and ``--ff`` now support long names: ``--last-failed`` and
+ ``--failed-first`` respectively.
+ Thanks `@MichaelAquilina`_ for the PR.
+
+* Added expected exceptions to pytest.raises fail message
+
+* Collection only displays progress ("collecting X items") when in a terminal.
+ This avoids cluttering the output when using ``--color=yes`` to obtain
+ colors in CI integrations systems (`#1397`_).
+
+**Bug Fixes**
+
+* The ``-s`` and ``-c`` options should now work under ``xdist``;
+ ``Config.fromdictargs`` now represents its input much more faithfully.
+ Thanks to `@bukzor`_ for the complete PR (`#680`_).
+
+* Fix (`#1290`_): support Python 3.5's ``@`` operator in assertion rewriting.
+ Thanks `@Shinkenjoe`_ for report with test case and `@tomviner`_ for the PR.
+
+* Fix formatting utf-8 explanation messages (`#1379`_).
+ Thanks `@biern`_ for the PR.
+
+* Fix `traceback style docs`_ to describe all of the available options
+ (auto/long/short/line/native/no), with `auto` being the default since v2.6.
+ Thanks `@hackebrot`_ for the PR.
+
+* Fix (`#1422`_): junit record_xml_property doesn't allow multiple records
+ with same name.
+
+
+.. _`traceback style docs`: https://pytest.org/latest/usage.html#modifying-python-traceback-printing
+
+.. _#1422: https://github.com/pytest-dev/pytest/issues/1422
+.. _#1379: https://github.com/pytest-dev/pytest/issues/1379
+.. _#1366: https://github.com/pytest-dev/pytest/issues/1366
+.. _#1040: https://github.com/pytest-dev/pytest/pull/1040
+.. _#680: https://github.com/pytest-dev/pytest/issues/680
+.. _#1287: https://github.com/pytest-dev/pytest/pull/1287
+.. _#1226: https://github.com/pytest-dev/pytest/pull/1226
+.. _#1290: https://github.com/pytest-dev/pytest/pull/1290
+.. _#1355: https://github.com/pytest-dev/pytest/pull/1355
+.. _#1397: https://github.com/pytest-dev/pytest/issues/1397
+.. _@biern: https://github.com/biern
+.. _@MichaelAquilina: https://github.com/MichaelAquilina
+.. _@bukzor: https://github.com/bukzor
+.. _@hpk42: https://github.com/hpk42
+.. _@nicoddemus: https://github.com/nicoddemus
+.. _@jab: https://github.com/jab
+.. _@codewarrior0: https://github.com/codewarrior0
+.. _@jaraco: https://github.com/jaraco
+.. _@The-Compiler: https://github.com/The-Compiler
+.. _@Shinkenjoe: https://github.com/Shinkenjoe
+.. _@tomviner: https://github.com/tomviner
+.. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt
+.. _@rabbbit: https://github.com/rabbbit
+.. _@hackebrot: https://github.com/hackebrot \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.1.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.1.rst
new file mode 100644
index 000000000..05a448430
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/release-2.9.1.rst
@@ -0,0 +1,65 @@
+pytest-2.9.1
+============
+
+pytest is a mature Python testing tool with more than a 1100 tests
+against itself, passing on many different interpreters and platforms.
+
+See below for the changes and see docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to all who contributed to this release, among them:
+
+ Bruno Oliveira
+ Daniel Hahler
+ Dmitry Malinovsky
+ Florian Bruhin
+ Floris Bruynooghe
+ Matt Bachmann
+ Ronny Pfannschmidt
+ TomV
+ Vladimir Bolshakov
+ Zearin
+ palaviv
+
+
+Happy testing,
+The py.test Development Team
+
+
+2.9.1 (compared to 2.9.0)
+-------------------------
+
+**Bug Fixes**
+
+* Improve error message when a plugin fails to load.
+ Thanks `@nicoddemus`_ for the PR.
+
+* Fix (`#1178 <https://github.com/pytest-dev/pytest/issues/1178>`_):
+ ``pytest.fail`` with non-ascii characters raises an internal pytest error.
+ Thanks `@nicoddemus`_ for the PR.
+
+* Fix (`#469`_): junit parses report.nodeid incorrectly, when params IDs
+ contain ``::``. Thanks `@tomviner`_ for the PR (`#1431`_).
+
+* Fix (`#578 <https://github.com/pytest-dev/pytest/issues/578>`_): SyntaxErrors
+ containing non-ascii lines at the point of failure generated an internal
+ py.test error.
+ Thanks `@asottile`_ for the report and `@nicoddemus`_ for the PR.
+
+* Fix (`#1437`_): When passing in a bytestring regex pattern to parameterize
+ attempt to decode it as utf-8 ignoring errors.
+
+* Fix (`#649`_): parametrized test nodes cannot be specified to run on the command line.
+
+
+.. _#1437: https://github.com/pytest-dev/pytest/issues/1437
+.. _#469: https://github.com/pytest-dev/pytest/issues/469
+.. _#1431: https://github.com/pytest-dev/pytest/pull/1431
+.. _#649: https://github.com/pytest-dev/pytest/issues/649
+
+.. _@asottile: https://github.com/asottile
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/announce/sprint2016.rst b/testing/web-platform/tests/tools/pytest/doc/en/announce/sprint2016.rst
new file mode 100644
index 000000000..e59ccdda7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/announce/sprint2016.rst
@@ -0,0 +1,105 @@
+python testing sprint June 20th-26th 2016
+======================================================
+
+.. image:: ../img/freiburg2.jpg
+ :width: 400
+
+The pytest core group is heading towards the biggest sprint
+in its history, to take place in the black forest town Freiburg
+in Germany. As of February 2016 we have started a `funding
+campaign on Indiegogo to cover expenses
+<http://igg.me/at/pytest-sprint/x/4034848>`_ The page also mentions
+some preliminary topics:
+
+- improving pytest-xdist test scheduling to take into account
+ fixture setups and explicit user hints.
+
+- provide info on fixture dependencies during --collect-only
+
+- tying pytest-xdist to tox so that you can do "py.test -e py34"
+ to run tests in a particular tox-managed virtualenv. Also
+ look into making pytest-xdist use tox environments on
+ remote ssh-sides so that remote dependency management becomes
+ easier.
+
+- refactoring the fixture system so more people understand it :)
+
+- integrating PyUnit setup methods as autouse fixtures.
+ possibly adding ways to influence ordering of same-scoped
+ fixtures (so you can make a choice of which fixtures come
+ before others)
+
+- fixing bugs and issues from the tracker, really an endless source :)
+
+
+Participants
+--------------
+
+Here are preliminary participants who said they are likely to come,
+given some expenses funding::
+
+ Anatoly Bubenkoff, Netherlands
+ Andreas Pelme, Personalkollen, Sweden
+ Anthony Wang, Splunk, US
+ Brianna Laugher, Australia
+ Bruno Oliveira, Brazil
+ Danielle Jenkins, Splunk, US
+ Dave Hunt, UK
+ Florian Bruhin, Switzerland
+ Floris Bruynooghe, Cobe.io, UK
+ Holger Krekel, merlinux, Germany
+ Oliver Bestwalter, Avira, Germany
+ Omar Kohl, Germany
+ Raphael Pierzina, FanDuel, UK
+ Tom Viner, UK
+
+ <your name here?>
+
+Other contributors and experienced newcomers are invited to join as well
+but please send a mail to the pytest-dev mailing list if you intend to
+do so somewhat soon, also how much funding you need if so. And if you
+are working for a company and using pytest heavily you are welcome to
+join and we encourage your company to provide some funding for the
+sprint. They may see it, and rightfully so, as a very cheap and deep
+training which brings you together with the experts in the field :)
+
+
+Sprint organisation, schedule
+-------------------------------
+
+tentative schedule:
+
+- 19/20th arrival in Freiburg
+- 20th social get together, initial hacking
+- 21/22th full sprint days
+- 23rd break day, hiking
+- 24/25th full sprint days
+- 26th departure
+
+We might adjust according to weather to make sure that if
+we do some hiking or excursion we'll have good weather.
+Freiburg is one of the sunniest places in Germany so
+it shouldn't be too much of a constraint.
+
+
+Accomodation
+----------------
+
+We'll see to arrange for renting a flat with multiple
+beds/rooms. Hotels are usually below 100 per night.
+The earlier we book the better.
+
+Money / funding
+---------------
+
+The Indiegogo campaign asks for 11000 USD which should cover
+the costs for flights and accomodation, renting a sprint place
+and maybe a bit of food as well.
+
+If your organisation wants to support the sprint but prefers
+to give money according to an invoice, get in contact with
+holger at http://merlinux.eu who can invoice your organisation
+properly.
+
+If we have excess money we'll use for further sprint/travel
+funding for pytest/tox contributors.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/assert.rst b/testing/web-platform/tests/tools/pytest/doc/en/assert.rst
new file mode 100644
index 000000000..e7f14e8bd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/assert.rst
@@ -0,0 +1,289 @@
+
+The writing and reporting of assertions in tests
+==================================================
+
+.. _`assertfeedback`:
+.. _`assert with the assert statement`:
+.. _`assert`:
+
+
+Asserting with the ``assert`` statement
+---------------------------------------------------------
+
+``pytest`` allows you to use the standard python ``assert`` for verifying
+expectations and values in Python tests. For example, you can write the
+following::
+
+ # content of test_assert1.py
+ def f():
+ return 3
+
+ def test_function():
+ assert f() == 4
+
+to assert that your function returns a certain value. If this assertion fails
+you will see the return value of the function call::
+
+ $ py.test test_assert1.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_assert1.py F
+
+ ======= FAILURES ========
+ _______ test_function ________
+
+ def test_function():
+ > assert f() == 4
+ E assert 3 == 4
+ E + where 3 = f()
+
+ test_assert1.py:5: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+``pytest`` has support for showing the values of the most common subexpressions
+including calls, attributes, comparisons, and binary and unary
+operators. (See :ref:`tbreportdemo`). This allows you to use the
+idiomatic python constructs without boilerplate code while not losing
+introspection information.
+
+However, if you specify a message with the assertion like this::
+
+ assert a % 2 == 0, "value was odd, should be even"
+
+then no assertion introspection takes places at all and the message
+will be simply shown in the traceback.
+
+See :ref:`assert-details` for more information on assertion introspection.
+
+.. _`assertraises`:
+
+Assertions about expected exceptions
+------------------------------------------
+
+In order to write assertions about raised exceptions, you can use
+``pytest.raises`` as a context manager like this::
+
+ import pytest
+
+ def test_zero_division():
+ with pytest.raises(ZeroDivisionError):
+ 1 / 0
+
+and if you need to have access to the actual exception info you may use::
+
+ def test_recursion_depth():
+ with pytest.raises(RuntimeError) as excinfo:
+ def f():
+ f()
+ f()
+ assert 'maximum recursion' in str(excinfo.value)
+
+``excinfo`` is a ``ExceptionInfo`` instance, which is a wrapper around
+the actual exception raised. The main attributes of interest are
+``.type``, ``.value`` and ``.traceback``.
+
+If you want to write test code that works on Python 2.4 as well,
+you may also use two other ways to test for an expected exception::
+
+ pytest.raises(ExpectedException, func, *args, **kwargs)
+ pytest.raises(ExpectedException, "func(*args, **kwargs)")
+
+both of which execute the specified function with args and kwargs and
+asserts that the given ``ExpectedException`` is raised. The reporter will
+provide you with helpful output in case of failures such as *no
+exception* or *wrong exception*.
+
+Note that it is also possible to specify a "raises" argument to
+``pytest.mark.xfail``, which checks that the test is failing in a more
+specific way than just having any exception raised::
+
+ @pytest.mark.xfail(raises=IndexError)
+ def test_f():
+ f()
+
+Using ``pytest.raises`` is likely to be better for cases where you are testing
+exceptions your own code is deliberately raising, whereas using
+``@pytest.mark.xfail`` with a check function is probably better for something
+like documenting unfixed bugs (where the test describes what "should" happen)
+or bugs in dependencies.
+
+
+.. _`assertwarns`:
+
+Assertions about expected warnings
+-----------------------------------------
+
+.. versionadded:: 2.8
+
+You can check that code raises a particular warning using
+:ref:`pytest.warns <warns>`.
+
+
+.. _newreport:
+
+Making use of context-sensitive comparisons
+-------------------------------------------------
+
+.. versionadded:: 2.0
+
+``pytest`` has rich support for providing context-sensitive information
+when it encounters comparisons. For example::
+
+ # content of test_assert2.py
+
+ def test_set_comparison():
+ set1 = set("1308")
+ set2 = set("8035")
+ assert set1 == set2
+
+if you run this module::
+
+ $ py.test test_assert2.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_assert2.py F
+
+ ======= FAILURES ========
+ _______ test_set_comparison ________
+
+ def test_set_comparison():
+ set1 = set("1308")
+ set2 = set("8035")
+ > assert set1 == set2
+ E assert set(['0', '1', '3', '8']) == set(['0', '3', '5', '8'])
+ E Extra items in the left set:
+ E '1'
+ E Extra items in the right set:
+ E '5'
+ E Use -v to get the full diff
+
+ test_assert2.py:5: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+Special comparisons are done for a number of cases:
+
+* comparing long strings: a context diff is shown
+* comparing long sequences: first failing indices
+* comparing dicts: different entries
+
+See the :ref:`reporting demo <tbreportdemo>` for many more examples.
+
+Defining your own assertion comparison
+----------------------------------------------
+
+It is possible to add your own detailed explanations by implementing
+the ``pytest_assertrepr_compare`` hook.
+
+.. autofunction:: _pytest.hookspec.pytest_assertrepr_compare
+
+As an example consider adding the following hook in a conftest.py which
+provides an alternative explanation for ``Foo`` objects::
+
+ # content of conftest.py
+ from test_foocompare import Foo
+ def pytest_assertrepr_compare(op, left, right):
+ if isinstance(left, Foo) and isinstance(right, Foo) and op == "==":
+ return ['Comparing Foo instances:',
+ ' vals: %s != %s' % (left.val, right.val)]
+
+now, given this test module::
+
+ # content of test_foocompare.py
+ class Foo:
+ def __init__(self, val):
+ self.val = val
+
+ def __eq__(self, other):
+ return self.val == other.val
+
+ def test_compare():
+ f1 = Foo(1)
+ f2 = Foo(2)
+ assert f1 == f2
+
+you can run the test module and get the custom output defined in
+the conftest file::
+
+ $ py.test -q test_foocompare.py
+ F
+ ======= FAILURES ========
+ _______ test_compare ________
+
+ def test_compare():
+ f1 = Foo(1)
+ f2 = Foo(2)
+ > assert f1 == f2
+ E assert Comparing Foo instances:
+ E vals: 1 != 2
+
+ test_foocompare.py:11: AssertionError
+ 1 failed in 0.12 seconds
+
+.. _assert-details:
+.. _`assert introspection`:
+
+Advanced assertion introspection
+----------------------------------
+
+.. versionadded:: 2.1
+
+
+Reporting details about a failing assertion is achieved either by rewriting
+assert statements before they are run or re-evaluating the assert expression and
+recording the intermediate values. Which technique is used depends on the
+location of the assert, ``pytest`` configuration, and Python version being used
+to run ``pytest``.
+
+By default, ``pytest`` rewrites assert statements in test modules.
+Rewritten assert statements put introspection information into the assertion failure message.
+``pytest`` only rewrites test modules directly discovered by its test collection process, so
+asserts in supporting modules which are not themselves test modules will not be
+rewritten.
+
+.. note::
+
+ ``pytest`` rewrites test modules on import. It does this by using an import
+ hook to write a new pyc files. Most of the time this works transparently.
+ However, if you are messing with import yourself, the import hook may
+ interfere. If this is the case, simply use ``--assert=reinterp`` or
+ ``--assert=plain``. Additionally, rewriting will fail silently if it cannot
+ write new pycs, i.e. in a read-only filesystem or a zipfile.
+
+If an assert statement has not been rewritten or the Python version is less than
+2.6, ``pytest`` falls back on assert reinterpretation. In assert
+reinterpretation, ``pytest`` walks the frame of the function containing the
+assert statement to discover sub-expression results of the failing assert
+statement. You can force ``pytest`` to always use assertion reinterpretation by
+passing the ``--assert=reinterp`` option.
+
+Assert reinterpretation has a caveat not present with assert rewriting: If
+evaluating the assert expression has side effects you may get a warning that the
+intermediate values could not be determined safely. A common example of this
+issue is an assertion which reads from a file::
+
+ assert f.read() != '...'
+
+If this assertion fails then the re-evaluation will probably succeed!
+This is because ``f.read()`` will return an empty string when it is
+called the second time during the re-evaluation. However, it is
+easy to rewrite the assertion and avoid any trouble::
+
+ content = f.read()
+ assert content != '...'
+
+All assert introspection can be turned off by passing ``--assert=plain``.
+
+For further information, Benjamin Peterson wrote up `Behind the scenes of pytest's new assertion rewriting <http://pybites.blogspot.com/2011/07/behind-scenes-of-pytests-new-assertion.html>`_.
+
+.. versionadded:: 2.1
+ Add assert rewriting as an alternate introspection technique.
+
+.. versionchanged:: 2.1
+ Introduce the ``--assert`` option. Deprecate ``--no-assert`` and
+ ``--nomagic``.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/bash-completion.rst b/testing/web-platform/tests/tools/pytest/doc/en/bash-completion.rst
new file mode 100644
index 000000000..b2a52fa63
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/bash-completion.rst
@@ -0,0 +1,28 @@
+
+.. _bash_completion:
+
+Setting up bash completion
+==========================
+
+When using bash as your shell, ``pytest`` can use argcomplete
+(https://argcomplete.readthedocs.org/) for auto-completion.
+For this ``argcomplete`` needs to be installed **and** enabled.
+
+Install argcomplete using::
+
+ sudo pip install 'argcomplete>=0.5.7'
+
+For global activation of all argcomplete enabled python applications run::
+
+ sudo activate-global-python-argcomplete
+
+For permanent (but not global) ``pytest`` activation, use::
+
+ register-python-argcomplete py.test >> ~/.bashrc
+
+For one-time activation of argcomplete for ``pytest`` only, use::
+
+ eval "$(register-python-argcomplete py.test)"
+
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/builtin.rst b/testing/web-platform/tests/tools/pytest/doc/en/builtin.rst
new file mode 100644
index 000000000..b18c3f828
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/builtin.rst
@@ -0,0 +1,134 @@
+
+.. _`pytest helpers`:
+
+Pytest API and builtin fixtures
+================================================
+
+This is a list of ``pytest.*`` API functions and fixtures.
+
+For information on plugin hooks and objects, see :ref:`plugins`.
+
+For information on the ``pytest.mark`` mechanism, see :ref:`mark`.
+
+For the below objects, you can also interactively ask for help, e.g. by
+typing on the Python interactive prompt something like::
+
+ import pytest
+ help(pytest)
+
+.. currentmodule:: pytest
+
+Invoking pytest interactively
+---------------------------------------------------
+
+.. autofunction:: main
+
+More examples at :ref:`pytest.main-usage`
+
+
+Helpers for assertions about Exceptions/Warnings
+--------------------------------------------------------
+
+.. autofunction:: raises
+
+Examples at :ref:`assertraises`.
+
+.. autofunction:: deprecated_call
+
+Raising a specific test outcome
+--------------------------------------
+
+You can use the following functions in your test, fixture or setup
+functions to force a certain test outcome. Note that most often
+you can rather use declarative marks, see :ref:`skipping`.
+
+.. autofunction:: _pytest.runner.fail
+.. autofunction:: _pytest.runner.skip
+.. autofunction:: _pytest.runner.importorskip
+.. autofunction:: _pytest.skipping.xfail
+.. autofunction:: _pytest.runner.exit
+
+fixtures and requests
+-----------------------------------------------------
+
+To mark a fixture function:
+
+.. autofunction:: _pytest.python.fixture
+
+Tutorial at :ref:`fixtures`.
+
+The ``request`` object that can be used from fixture functions.
+
+.. autoclass:: _pytest.python.FixtureRequest()
+ :members:
+
+
+.. _builtinfixtures:
+.. _builtinfuncargs:
+
+Builtin fixtures/function arguments
+-----------------------------------------
+
+You can ask for available builtin or project-custom
+:ref:`fixtures <fixtures>` by typing::
+
+ $ py.test -q --fixtures
+ cache
+ Return a cache object that can persist state between testing sessions.
+
+ cache.get(key, default)
+ cache.set(key, value)
+
+ Keys must be a ``/`` separated value, where the first part is usually the
+ name of your plugin or application to avoid clashes with other cache users.
+
+ Values can be any object handled by the json stdlib module.
+ capsys
+ enables capturing of writes to sys.stdout/sys.stderr and makes
+ captured output available via ``capsys.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ capfd
+ enables capturing of writes to file descriptors 1 and 2 and makes
+ captured output available via ``capfd.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ record_xml_property
+ Fixture that adds extra xml properties to the tag for the calling test.
+ The fixture is callable with (name, value), with value being automatically
+ xml-encoded.
+ monkeypatch
+ The returned ``monkeypatch`` funcarg provides these
+ helper methods to modify objects, dictionaries or os.environ::
+
+ monkeypatch.setattr(obj, name, value, raising=True)
+ monkeypatch.delattr(obj, name, raising=True)
+ monkeypatch.setitem(mapping, name, value)
+ monkeypatch.delitem(obj, name, raising=True)
+ monkeypatch.setenv(name, value, prepend=False)
+ monkeypatch.delenv(name, value, raising=True)
+ monkeypatch.syspath_prepend(path)
+ monkeypatch.chdir(path)
+
+ All modifications will be undone after the requesting
+ test function has finished. The ``raising``
+ parameter determines if a KeyError or AttributeError
+ will be raised if the set/deletion operation has no target.
+ pytestconfig
+ the pytest config object with access to command line opts.
+ recwarn
+ Return a WarningsRecorder instance that provides these methods:
+
+ * ``pop(category=None)``: return last warning matching the category.
+ * ``clear()``: clear list of warnings
+
+ See http://docs.python.org/library/warnings.html for information
+ on warning categories.
+ tmpdir_factory
+ Return a TempdirFactory instance for the test session.
+ tmpdir
+ return a temporary directory path object
+ which is unique to each test function invocation,
+ created as a sub directory of the base temporary
+ directory. The returned object is a `py.path.local`_
+ path object.
+
+ no tests ran in 0.12 seconds
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/cache.rst b/testing/web-platform/tests/tools/pytest/doc/en/cache.rst
new file mode 100644
index 000000000..52abb52a0
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/cache.rst
@@ -0,0 +1,278 @@
+Cache: working with cross-testrun state
+=======================================
+
+.. versionadded:: 2.8
+
+.. warning::
+
+ The functionality of this core plugin was previously distributed
+ as a third party plugin named ``pytest-cache``. The core plugin
+ is compatible regarding command line options and API usage except that you
+ can only store/receive data between test runs that is json-serializable.
+
+
+Usage
+---------
+
+The plugin provides two command line options to rerun failures from the
+last ``py.test`` invocation:
+
+* ``--lf``, ``--last-failed`` - to only re-run the failures.
+* ``--ff``, ``--failed-first`` - to run the failures first and then the rest of
+ the tests.
+
+For cleanup (usually not needed), a ``--cache-clear`` option allows to remove
+all cross-session cache contents ahead of a test run.
+
+Other plugins may access the `config.cache`_ object to set/get
+**json encodable** values between ``py.test`` invocations.
+
+.. note::
+
+ This plugin is enabled by default, but can be disabled if needed: see
+ :ref:`cmdunregister` (the internal name for this plugin is
+ ``cacheprovider``).
+
+
+Rerunning only failures or failures first
+-----------------------------------------------
+
+First, let's create 50 test invocation of which only 2 fail::
+
+ # content of test_50.py
+ import pytest
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ pytest.fail("bad luck")
+
+If you run this for the first time you will see two failures::
+
+ $ py.test -q
+ .................F.......F........................
+ ======= FAILURES ========
+ _______ test_num[17] ________
+
+ i = 17
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ > pytest.fail("bad luck")
+ E Failed: bad luck
+
+ test_50.py:6: Failed
+ _______ test_num[25] ________
+
+ i = 25
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ > pytest.fail("bad luck")
+ E Failed: bad luck
+
+ test_50.py:6: Failed
+ 2 failed, 48 passed in 0.12 seconds
+
+If you then run it with ``--lf``::
+
+ $ py.test --lf
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ run-last-failure: rerun last 2 failures
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 50 items
+
+ test_50.py FF
+
+ ======= FAILURES ========
+ _______ test_num[17] ________
+
+ i = 17
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ > pytest.fail("bad luck")
+ E Failed: bad luck
+
+ test_50.py:6: Failed
+ _______ test_num[25] ________
+
+ i = 25
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ > pytest.fail("bad luck")
+ E Failed: bad luck
+
+ test_50.py:6: Failed
+ ======= 2 failed, 48 deselected in 0.12 seconds ========
+
+You have run only the two failing test from the last run, while 48 tests have
+not been run ("deselected").
+
+Now, if you run with the ``--ff`` option, all tests will be run but the first
+previous failures will be executed first (as can be seen from the series
+of ``FF`` and dots)::
+
+ $ py.test --ff
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ run-last-failure: rerun last 2 failures first
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 50 items
+
+ test_50.py FF................................................
+
+ ======= FAILURES ========
+ _______ test_num[17] ________
+
+ i = 17
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ > pytest.fail("bad luck")
+ E Failed: bad luck
+
+ test_50.py:6: Failed
+ _______ test_num[25] ________
+
+ i = 25
+
+ @pytest.mark.parametrize("i", range(50))
+ def test_num(i):
+ if i in (17, 25):
+ > pytest.fail("bad luck")
+ E Failed: bad luck
+
+ test_50.py:6: Failed
+ ======= 2 failed, 48 passed in 0.12 seconds ========
+
+.. _`config.cache`:
+
+The new config.cache object
+--------------------------------
+
+.. regendoc:wipe
+
+Plugins or conftest.py support code can get a cached value using the
+pytest ``config`` object. Here is a basic example plugin which
+implements a :ref:`fixture` which re-uses previously created state
+across py.test invocations::
+
+ # content of test_caching.py
+ import pytest
+ import time
+
+ @pytest.fixture
+ def mydata(request):
+ val = request.config.cache.get("example/value", None)
+ if val is None:
+ time.sleep(9*0.6) # expensive computation :)
+ val = 42
+ request.config.cache.set("example/value", val)
+ return val
+
+ def test_function(mydata):
+ assert mydata == 23
+
+If you run this command once, it will take a while because
+of the sleep::
+
+ $ py.test -q
+ F
+ ======= FAILURES ========
+ _______ test_function ________
+
+ mydata = 42
+
+ def test_function(mydata):
+ > assert mydata == 23
+ E assert 42 == 23
+
+ test_caching.py:14: AssertionError
+ 1 failed in 0.12 seconds
+
+If you run it a second time the value will be retrieved from
+the cache and this will be quick::
+
+ $ py.test -q
+ F
+ ======= FAILURES ========
+ _______ test_function ________
+
+ mydata = 42
+
+ def test_function(mydata):
+ > assert mydata == 23
+ E assert 42 == 23
+
+ test_caching.py:14: AssertionError
+ 1 failed in 0.12 seconds
+
+See the `cache-api`_ for more details.
+
+
+Inspecting Cache content
+-------------------------------
+
+You can always peek at the content of the cache using the
+``--cache-clear`` command line option::
+
+ $ py.test --cache-clear
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_caching.py F
+
+ ======= FAILURES ========
+ _______ test_function ________
+
+ mydata = 42
+
+ def test_function(mydata):
+ > assert mydata == 23
+ E assert 42 == 23
+
+ test_caching.py:14: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+Clearing Cache content
+-------------------------------
+
+You can instruct pytest to clear all cache files and values
+by adding the ``--cache-clear`` option like this::
+
+ py.test --cache-clear
+
+This is recommended for invocations from Continous Integration
+servers where isolation and correctness is more important
+than speed.
+
+
+.. _`cache-api`:
+
+config.cache API
+------------------
+
+The ``config.cache`` object allows other plugins,
+including ``conftest.py`` files,
+to safely and flexibly store and retrieve values across
+test runs because the ``config`` object is available
+in many places.
+
+Under the hood, the cache plugin uses the simple
+dumps/loads API of the json stdlib module
+
+.. currentmodule:: _pytest.cacheprovider
+
+.. automethod:: Cache.get
+.. automethod:: Cache.set
+.. automethod:: Cache.makedir
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/capture.rst b/testing/web-platform/tests/tools/pytest/doc/en/capture.rst
new file mode 100644
index 000000000..8892f5be7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/capture.rst
@@ -0,0 +1,118 @@
+
+.. _`captures`:
+
+Capturing of the stdout/stderr output
+=========================================================
+
+Default stdout/stderr/stdin capturing behaviour
+---------------------------------------------------------
+
+During test execution any output sent to ``stdout`` and ``stderr`` is
+captured. If a test or a setup method fails its according captured
+output will usually be shown along with the failure traceback.
+
+In addition, ``stdin`` is set to a "null" object which will
+fail on attempts to read from it because it is rarely desired
+to wait for interactive input when running automated tests.
+
+By default capturing is done by intercepting writes to low level
+file descriptors. This allows to capture output from simple
+print statements as well as output from a subprocess started by
+a test.
+
+Setting capturing methods or disabling capturing
+-------------------------------------------------
+
+There are two ways in which ``pytest`` can perform capturing:
+
+* file descriptor (FD) level capturing (default): All writes going to the
+ operating system file descriptors 1 and 2 will be captured.
+
+* ``sys`` level capturing: Only writes to Python files ``sys.stdout``
+ and ``sys.stderr`` will be captured. No capturing of writes to
+ filedescriptors is performed.
+
+.. _`disable capturing`:
+
+You can influence output capturing mechanisms from the command line::
+
+ py.test -s # disable all capturing
+ py.test --capture=sys # replace sys.stdout/stderr with in-mem files
+ py.test --capture=fd # also point filedescriptors 1 and 2 to temp file
+
+.. _printdebugging:
+
+Using print statements for debugging
+---------------------------------------------------
+
+One primary benefit of the default capturing of stdout/stderr output
+is that you can use print statements for debugging::
+
+ # content of test_module.py
+
+ def setup_function(function):
+ print ("setting up %s" % function)
+
+ def test_func1():
+ assert True
+
+ def test_func2():
+ assert False
+
+and running this module will show you precisely the output
+of the failing function and hide the other one::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py .F
+
+ ======= FAILURES ========
+ _______ test_func2 ________
+
+ def test_func2():
+ > assert False
+ E assert False
+
+ test_module.py:9: AssertionError
+ -------------------------- Captured stdout setup ---------------------------
+ setting up <function test_func2 at 0xdeadbeef>
+ ======= 1 failed, 1 passed in 0.12 seconds ========
+
+Accessing captured output from a test function
+---------------------------------------------------
+
+The ``capsys`` and ``capfd`` fixtures allow to access stdout/stderr
+output created during test execution. Here is an example test function
+that performs some output related checks:
+
+.. code-block:: python
+
+ def test_myoutput(capsys): # or use "capfd" for fd-level
+ print ("hello")
+ sys.stderr.write("world\n")
+ out, err = capsys.readouterr()
+ assert out == "hello\n"
+ assert err == "world\n"
+ print "next"
+ out, err = capsys.readouterr()
+ assert out == "next\n"
+
+The ``readouterr()`` call snapshots the output so far -
+and capturing will be continued. After the test
+function finishes the original streams will
+be restored. Using ``capsys`` this way frees your
+test from having to care about setting/resetting
+output streams and also interacts well with pytest's
+own per-test capturing.
+
+If you want to capture on filedescriptor level you can use
+the ``capfd`` function argument which offers the exact
+same interface but allows to also capture output from
+libraries or subprocesses that directly write to operating
+system level output streams (FD1 and FD2).
+
+.. include:: links.inc
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/changelog.rst b/testing/web-platform/tests/tools/pytest/doc/en/changelog.rst
new file mode 100644
index 000000000..a59b3c7e2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/changelog.rst
@@ -0,0 +1,7 @@
+
+.. _changelog:
+
+Changelog history
+=================================
+
+.. include:: ../../CHANGELOG.rst
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/check_sphinx.py b/testing/web-platform/tests/tools/pytest/doc/en/check_sphinx.py
new file mode 100644
index 000000000..0f536ffa6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/check_sphinx.py
@@ -0,0 +1,17 @@
+import py
+import subprocess
+def test_build_docs(tmpdir):
+ doctrees = tmpdir.join("doctrees")
+ htmldir = tmpdir.join("html")
+ subprocess.check_call([
+ "sphinx-build", "-W", "-bhtml",
+ "-d", str(doctrees), ".", str(htmldir)])
+
+def test_linkcheck(tmpdir):
+ doctrees = tmpdir.join("doctrees")
+ htmldir = tmpdir.join("html")
+ subprocess.check_call(
+ ["sphinx-build", "-blinkcheck",
+ "-d", str(doctrees), ".", str(htmldir)])
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/conf.py b/testing/web-platform/tests/tools/pytest/doc/en/conf.py
new file mode 100644
index 000000000..aca0442c5
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/conf.py
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+#
+# pytest documentation build configuration file, created by
+# sphinx-quickstart on Fri Oct 8 17:54:28 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+# The short X.Y version.
+
+import os, sys
+sys.path.insert(0, os.path.dirname(__file__))
+import _getdoctarget
+
+version = _getdoctarget.get_minor_version_string()
+release = _getdoctarget.get_version_string()
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+autodoc_member_order = "bysource"
+todo_include_todos = 1
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'contents'
+
+# General information about the project.
+project = u'pytest'
+copyright = u'2015, holger krekel and pytest-dev team'
+
+
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['links.inc', '_build', 'naming20.rst', 'test/*',
+ "old_*",
+ '*attic*',
+ '*/attic*',
+ 'funcargs.rst',
+ 'setup.rst',
+ 'example/remoteinterp.rst',
+ ]
+
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = False
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+sys.path.append(os.path.abspath('_themes'))
+html_theme_path = ['_themes']
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'flask'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+ 'index_logo': None
+}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+html_short_title = "pytest-%s" % release
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_logo = "img/pytest1.png"
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = "img/pytest1favi.ico"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+#html_sidebars = {'index': 'indexsidebar.html'}
+
+html_sidebars = {
+ 'index': [
+ 'sidebarintro.html',
+ 'globaltoc.html',
+ 'links.html',
+ 'sourcelink.html',
+ 'searchbox.html'
+ ],
+ '**': [
+ 'globaltoc.html',
+ 'relations.html',
+ 'links.html',
+ 'sourcelink.html',
+ 'searchbox.html'
+ ]
+}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+#html_additional_pages = {'index': 'index.html'}
+
+
+# If false, no module index is generated.
+html_domain_indices = True
+
+# If false, no index is generated.
+html_use_index = False
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'pytestdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('contents', 'pytest.tex', u'pytest Documentation',
+ u'holger krekel, trainer and consultant, http://merlinux.eu', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+latex_logo = 'img/pytest1.png'
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+latex_domain_indices = False
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('usage', 'pytest', u'pytest usage',
+ [u'holger krekel at merlinux eu'], 1)
+]
+
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'pytest'
+epub_author = u'holger krekel at merlinux eu'
+epub_publisher = u'holger krekel at merlinux eu'
+epub_copyright = u'2013, holger krekel et alii'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+
+# -- Options for texinfo output ------------------------------------------------
+
+texinfo_documents = [
+ (master_doc, 'pytest', 'pytest Documentation',
+ ('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*'
+ 'Floris Bruynooghe@*others'),
+ 'pytest',
+ 'simple powerful testing with Pytho',
+ 'Programming',
+ 1),
+]
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'python': ('http://docs.python.org/', None),
+# 'lib': ("http://docs.python.org/2.7library/", None),
+ }
+
+
+def setup(app):
+ #from sphinx.ext.autodoc import cut_lines
+ #app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
+ app.add_description_unit('confval', 'confval',
+ objname='configuration value',
+ indextemplate='pair: %s; configuration value')
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/conftest.py
new file mode 100644
index 000000000..1a62e1b5d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/conftest.py
@@ -0,0 +1 @@
+collect_ignore = ["conf.py"]
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/contact.rst b/testing/web-platform/tests/tools/pytest/doc/en/contact.rst
new file mode 100644
index 000000000..d4a1a03de
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/contact.rst
@@ -0,0 +1,51 @@
+
+.. _`contact channels`:
+.. _`contact`:
+
+Contact channels
+===================================
+
+- `pytest issue tracker`_ to report bugs or suggest features (for version
+ 2.0 and above).
+
+- `pytest on stackoverflow.com <http://stackoverflow.com/search?q=pytest>`_
+ to post questions with the tag ``pytest``. New Questions will usually
+ be seen by pytest users or developers and answered quickly.
+
+- `Testing In Python`_: a mailing list for Python testing tools and discussion.
+
+- `pytest-dev at python.org (mailing list)`_ pytest specific announcements and discussions.
+
+- `pytest-commit at python.org (mailing list)`_: for commits and new issues
+
+- :doc:`contribution guide <contributing>` for help on submitting pull
+ requests to bitbucket (including using git via gitifyhg).
+
+- #pylib on irc.freenode.net IRC channel for random questions.
+
+- private mail to Holger.Krekel at gmail com if you want to communicate sensitive issues
+
+
+- `merlinux.eu`_ offers pytest and tox-related professional teaching and
+ consulting.
+
+.. _`pytest issue tracker`: https://github.com/pytest-dev/pytest/issues
+.. _`old issue tracker`: http://bitbucket.org/hpk42/py-trunk/issues/
+
+.. _`merlinux.eu`: http://merlinux.eu
+
+.. _`get an account`:
+
+.. _tetamap: http://tetamap.wordpress.com
+
+.. _`@pylibcommit`: http://twitter.com/pylibcommit
+
+
+.. _`Testing in Python`: http://lists.idyll.org/listinfo/testing-in-python
+.. _FOAF: http://en.wikipedia.org/wiki/FOAF
+.. _`py-dev`:
+.. _`development mailing list`:
+.. _`pytest-dev at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-dev
+.. _`py-svn`:
+.. _`pytest-commit at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-commit
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/contents.rst b/testing/web-platform/tests/tools/pytest/doc/en/contents.rst
new file mode 100644
index 000000000..48c3471b5
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/contents.rst
@@ -0,0 +1,39 @@
+.. _toc:
+
+Full pytest documentation
+===========================
+
+`Download latest version as PDF <pytest.pdf>`_
+
+.. `Download latest version as EPUB <http://media.readthedocs.org/epub/pytest/latest/pytest.epub>`_
+
+.. toctree::
+ :maxdepth: 2
+
+ overview
+ apiref
+ example/index
+ monkeypatch
+ tmpdir
+ capture
+ recwarn
+ cache
+ plugins
+
+ contributing
+ talks
+
+.. only:: html
+
+ .. toctree::
+
+ funcarg_compare
+ announce/index
+
+.. only:: html
+
+ .. toctree::
+ :hidden:
+
+ changelog
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/contributing.rst b/testing/web-platform/tests/tools/pytest/doc/en/contributing.rst
new file mode 100644
index 000000000..2b6578f6b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/contributing.rst
@@ -0,0 +1,3 @@
+.. _contributing:
+
+.. include:: ../../CONTRIBUTING.rst
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/customize.rst b/testing/web-platform/tests/tools/pytest/doc/en/customize.rst
new file mode 100644
index 000000000..34e319c24
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/customize.rst
@@ -0,0 +1,228 @@
+Basic test configuration
+===================================
+
+Command line options and configuration file settings
+-----------------------------------------------------------------
+
+You can get help on command line options and values in INI-style
+configurations files by using the general help option::
+
+ py.test -h # prints options _and_ config file settings
+
+This will display command line and configuration file settings
+which were registered by installed plugins.
+
+.. _rootdir:
+.. _inifiles:
+
+initialization: determining rootdir and inifile
+-----------------------------------------------
+
+.. versionadded:: 2.7
+
+pytest determines a "rootdir" for each test run which depends on
+the command line arguments (specified test files, paths) and on
+the existence of inifiles. The determined rootdir and ini-file are
+printed as part of the pytest header. The rootdir is used for constructing
+"nodeids" during collection and may also be used by plugins to store
+project/testrun-specific information.
+
+Here is the algorithm which finds the rootdir from ``args``:
+
+- determine the common ancestor directory for the specified ``args``.
+
+- look for ``pytest.ini``, ``tox.ini`` and ``setup.cfg`` files in the
+ ancestor directory and upwards. If one is matched, it becomes the
+ ini-file and its directory becomes the rootdir. An existing
+ ``pytest.ini`` file will always be considered a match whereas
+ ``tox.ini`` and ``setup.cfg`` will only match if they contain
+ a ``[pytest]`` section.
+
+- if no ini-file was found, look for ``setup.py`` upwards from
+ the common ancestor directory to determine the ``rootdir``.
+
+- if no ini-file and no ``setup.py`` was found, use the already
+ determined common ancestor as root directory. This allows to
+ work with pytest in structures that are not part of a package
+ and don't have any particular ini-file configuration.
+
+Note that options from multiple ini-files candidates are never merged,
+the first one wins (``pytest.ini`` always wins even if it does not
+contain a ``[pytest]`` section).
+
+The ``config`` object will subsequently carry these attributes:
+
+- ``config.rootdir``: the determined root directory, guaranteed to exist.
+
+- ``config.inifile``: the determined ini-file, may be ``None``.
+
+The rootdir is used a reference directory for constructing test
+addresses ("nodeids") and can be used also by plugins for storing
+per-testrun information.
+
+Example::
+
+ py.test path/to/testdir path/other/
+
+will determine the common ancestor as ``path`` and then
+check for ini-files as follows::
+
+ # first look for pytest.ini files
+ path/pytest.ini
+ path/setup.cfg # must also contain [pytest] section to match
+ path/tox.ini # must also contain [pytest] section to match
+ pytest.ini
+ ... # all the way down to the root
+
+ # now look for setup.py
+ path/setup.py
+ setup.py
+ ... # all the way down to the root
+
+
+.. _`how to change command line options defaults`:
+.. _`adding default options`:
+
+How to change command line options defaults
+------------------------------------------------
+
+It can be tedious to type the same series of command line options
+every time you use ``pytest``. For example, if you always want to see
+detailed info on skipped and xfailed tests, as well as have terser "dot"
+progress output, you can write it into a configuration file:
+
+.. code-block:: ini
+
+ # content of pytest.ini
+ # (or tox.ini or setup.cfg)
+ [pytest]
+ addopts = -rsxX -q
+
+Alternatively, you can set a PYTEST_ADDOPTS environment variable to add command
+line options while the environment is in use::
+
+ export PYTEST_ADDOPTS="-rsxX -q"
+
+From now on, running ``pytest`` will add the specified options.
+
+
+
+Builtin configuration file options
+----------------------------------------------
+
+.. confval:: minversion
+
+ Specifies a minimal pytest version required for running tests.
+
+ minversion = 2.1 # will fail if we run with pytest-2.0
+
+.. confval:: addopts
+
+ Add the specified ``OPTS`` to the set of command line arguments as if they
+ had been specified by the user. Example: if you have this ini file content:
+
+ .. code-block:: ini
+
+ [pytest]
+ addopts = --maxfail=2 -rf # exit after 2 failures, report fail info
+
+ issuing ``py.test test_hello.py`` actually means::
+
+ py.test --maxfail=2 -rf test_hello.py
+
+ Default is to add no options.
+
+.. confval:: norecursedirs
+
+ Set the directory basename patterns to avoid when recursing
+ for test discovery. The individual (fnmatch-style) patterns are
+ applied to the basename of a directory to decide if to recurse into it.
+ Pattern matching characters::
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ Default patterns are ``'.*', 'CVS', '_darcs', '{arch}', '*.egg'``.
+ Setting a ``norecursedirs`` replaces the default. Here is an example of
+ how to avoid certain directories:
+
+ .. code-block:: ini
+
+ # content of setup.cfg
+ [pytest]
+ norecursedirs = .svn _build tmp*
+
+ This would tell ``pytest`` to not look into typical subversion or
+ sphinx-build directories or into any ``tmp`` prefixed directory.
+
+.. confval:: testpaths
+
+ .. versionadded:: 2.8
+
+ Sets list of directories that should be searched for tests when
+ no specific directories, files or test ids are given in the command line when
+ executing pytest from the :ref:`rootdir <rootdir>` directory.
+ Useful when all project tests are in a known location to speed up
+ test collection and to avoid picking up undesired tests by accident.
+
+ .. code-block:: ini
+
+ # content of pytest.ini
+ [pytest]
+ testpaths = testing doc
+
+ This tells pytest to only look for tests in ``testing`` and ``doc``
+ directories when executing from the root directory.
+
+.. confval:: python_files
+
+ One or more Glob-style file patterns determining which python files
+ are considered as test modules.
+
+.. confval:: python_classes
+
+ One or more name prefixes or glob-style patterns determining which classes
+ are considered for test collection. Here is an example of how to collect
+ tests from classes that end in ``Suite``:
+
+ .. code-block:: ini
+
+ # content of pytest.ini
+ [pytest]
+ python_classes = *Suite
+
+ Note that ``unittest.TestCase`` derived classes are always collected
+ regardless of this option, as ``unittest``'s own collection framework is used
+ to collect those tests.
+
+.. confval:: python_functions
+
+ One or more name prefixes or glob-patterns determining which test functions
+ and methods are considered tests. Here is an example of how
+ to collect test functions and methods that end in ``_test``:
+
+ .. code-block:: ini
+
+ # content of pytest.ini
+ [pytest]
+ python_functions = *_test
+
+ Note that this has no effect on methods that live on a ``unittest
+ .TestCase`` derived class, as ``unittest``'s own collection framework is used
+ to collect those tests.
+
+ See :ref:`change naming conventions` for more detailed examples.
+
+.. confval:: doctest_optionflags
+
+ One or more doctest flag names from the standard ``doctest`` module.
+ :doc:`See how py.test handles doctests <doctest>`.
+
+.. confval:: confcutdir
+
+ Sets a directory where search upwards for ``conftest.py`` files stops.
+ By default, pytest will stop searching for ``conftest.py`` files upwards
+ from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any,
+ or up to the file-system root.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/doctest.rst b/testing/web-platform/tests/tools/pytest/doc/en/doctest.rst
new file mode 100644
index 000000000..db764141e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/doctest.rst
@@ -0,0 +1,105 @@
+
+Doctest integration for modules and test files
+=========================================================
+
+By default all files matching the ``test*.txt`` pattern will
+be run through the python standard ``doctest`` module. You
+can change the pattern by issuing::
+
+ py.test --doctest-glob='*.rst'
+
+on the command line. Since version ``2.9``, ``--doctest-glob``
+can be given multiple times in the command-line.
+
+You can also trigger running of doctests
+from docstrings in all python modules (including regular
+python test modules)::
+
+ py.test --doctest-modules
+
+You can make these changes permanent in your project by
+putting them into a pytest.ini file like this:
+
+.. code-block:: ini
+
+ # content of pytest.ini
+ [pytest]
+ addopts = --doctest-modules
+
+If you then have a text file like this::
+
+ # content of example.rst
+
+ hello this is a doctest
+ >>> x = 3
+ >>> x
+ 3
+
+and another like this::
+
+ # content of mymodule.py
+ def something():
+ """ a doctest in a docstring
+ >>> something()
+ 42
+ """
+ return 42
+
+then you can just invoke ``py.test`` without command line options::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
+ collected 1 items
+
+ mymodule.py .
+
+ ======= 1 passed in 0.12 seconds ========
+
+It is possible to use fixtures using the ``getfixture`` helper::
+
+ # content of example.rst
+ >>> tmp = getfixture('tmpdir')
+ >>> ...
+ >>>
+
+Also, :ref:`usefixtures` and :ref:`autouse` fixtures are supported
+when executing text doctest files.
+
+The standard ``doctest`` module provides some setting flags to configure the
+strictness of doctest tests. In py.test You can enable those flags those flags
+using the configuration file. To make pytest ignore trailing whitespaces and
+ignore lengthy exception stack traces you can just write:
+
+.. code-block:: ini
+
+ [pytest]
+ doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
+
+py.test also introduces new options to allow doctests to run in Python 2 and
+Python 3 unchanged:
+
+* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
+ strings in expected doctest output.
+
+* ``ALLOW_BYTES``: when enabled, the ``b`` prefix is stripped from byte strings
+ in expected doctest output.
+
+As with any other option flag, these flags can be enabled in ``pytest.ini`` using
+the ``doctest_optionflags`` ini option:
+
+.. code-block:: ini
+
+ [pytest]
+ doctest_optionflags = ALLOW_UNICODE ALLOW_BYTES
+
+
+Alternatively, it can be enabled by an inline comment in the doc test
+itself::
+
+ # content of example.rst
+ >>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
+ 'Hello'
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py
new file mode 100644
index 000000000..a4ff758b1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py
@@ -0,0 +1,238 @@
+from pytest import raises
+import _pytest._code
+import py
+
+def otherfunc(a,b):
+ assert a==b
+
+def somefunc(x,y):
+ otherfunc(x,y)
+
+def otherfunc_multi(a,b):
+ assert (a ==
+ b)
+
+def test_generative(param1, param2):
+ assert param1 * 2 < param2
+
+def pytest_generate_tests(metafunc):
+ if 'param1' in metafunc.fixturenames:
+ metafunc.addcall(funcargs=dict(param1=3, param2=6))
+
+class TestFailing(object):
+ def test_simple(self):
+ def f():
+ return 42
+ def g():
+ return 43
+
+ assert f() == g()
+
+ def test_simple_multiline(self):
+ otherfunc_multi(
+ 42,
+ 6*9)
+
+ def test_not(self):
+ def f():
+ return 42
+ assert not f()
+
+class TestSpecialisedExplanations(object):
+ def test_eq_text(self):
+ assert 'spam' == 'eggs'
+
+ def test_eq_similar_text(self):
+ assert 'foo 1 bar' == 'foo 2 bar'
+
+ def test_eq_multiline_text(self):
+ assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+
+ def test_eq_long_text(self):
+ a = '1'*100 + 'a' + '2'*100
+ b = '1'*100 + 'b' + '2'*100
+ assert a == b
+
+ def test_eq_long_text_multiline(self):
+ a = '1\n'*100 + 'a' + '2\n'*100
+ b = '1\n'*100 + 'b' + '2\n'*100
+ assert a == b
+
+ def test_eq_list(self):
+ assert [0, 1, 2] == [0, 1, 3]
+
+ def test_eq_list_long(self):
+ a = [0]*100 + [1] + [3]*100
+ b = [0]*100 + [2] + [3]*100
+ assert a == b
+
+ def test_eq_dict(self):
+ assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
+
+ def test_eq_set(self):
+ assert set([0, 10, 11, 12]) == set([0, 20, 21])
+
+ def test_eq_longer_list(self):
+ assert [1,2] == [1,2,3]
+
+ def test_in_list(self):
+ assert 1 in [0, 2, 3, 4, 5]
+
+ def test_not_in_text_multiline(self):
+ text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
+ assert 'foo' not in text
+
+ def test_not_in_text_single(self):
+ text = 'single foo line'
+ assert 'foo' not in text
+
+ def test_not_in_text_single_long(self):
+ text = 'head ' * 50 + 'foo ' + 'tail ' * 20
+ assert 'foo' not in text
+
+ def test_not_in_text_single_long_term(self):
+ text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
+ assert 'f'*70 not in text
+
+
+def test_attribute():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ assert i.b == 2
+
+
+def test_attribute_instance():
+ class Foo(object):
+ b = 1
+ assert Foo().b == 2
+
+
+def test_attribute_failure():
+ class Foo(object):
+ def _get_b(self):
+ raise Exception('Failed to get attrib')
+ b = property(_get_b)
+ i = Foo()
+ assert i.b == 2
+
+
+def test_attribute_multiple():
+ class Foo(object):
+ b = 1
+ class Bar(object):
+ b = 2
+ assert Foo().b == Bar().b
+
+
+def globf(x):
+ return x+1
+
+class TestRaises:
+ def test_raises(self):
+ s = 'qwe'
+ raises(TypeError, "int(s)")
+
+ def test_raises_doesnt(self):
+ raises(IOError, "int('3')")
+
+ def test_raise(self):
+ raise ValueError("demo error")
+
+ def test_tupleerror(self):
+ a,b = [1]
+
+ def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
+ l = [1,2,3]
+ print ("l is %r" % l)
+ a,b = l.pop()
+
+ def test_some_error(self):
+ if namenotexi:
+ pass
+
+ def func1(self):
+ assert 41 == 42
+
+
+# thanks to Matthew Scott for this test
+def test_dynamic_compile_shows_nicely():
+ src = 'def foo():\n assert 1 == 0\n'
+ name = 'abc-123'
+ module = py.std.imp.new_module(name)
+ code = _pytest._code.compile(src, name, 'exec')
+ py.builtin.exec_(code, module.__dict__)
+ py.std.sys.modules[name] = module
+ module.foo()
+
+
+
+class TestMoreErrors:
+ def test_complex_error(self):
+ def f():
+ return 44
+ def g():
+ return 43
+ somefunc(f(), g())
+
+ def test_z1_unpack_error(self):
+ l = []
+ a,b = l
+
+ def test_z2_type_error(self):
+ l = 3
+ a,b = l
+
+ def test_startswith(self):
+ s = "123"
+ g = "456"
+ assert s.startswith(g)
+
+ def test_startswith_nested(self):
+ def f():
+ return "123"
+ def g():
+ return "456"
+ assert f().startswith(g())
+
+ def test_global_func(self):
+ assert isinstance(globf(42), float)
+
+ def test_instance(self):
+ self.x = 6*7
+ assert self.x != 42
+
+ def test_compare(self):
+ assert globf(10) < 5
+
+ def test_try_finally(self):
+ x = 1
+ try:
+ assert x == 0
+ finally:
+ x = 0
+
+
+class TestCustomAssertMsg:
+
+ def test_single_line(self):
+ class A:
+ a = 1
+ b = 2
+ assert A.a == b, "A.a appears not to be b"
+
+ def test_multiline(self):
+ class A:
+ a = 1
+ b = 2
+ assert A.a == b, "A.a appears not to be b\n" \
+ "or does not appear to be b\none of those"
+
+ def test_custom_repr(self):
+ class JSON:
+ a = 1
+ def __repr__(self):
+ return "This is JSON\n{\n 'foo': 'bar'\n}"
+ a = JSON()
+ b = 2
+ assert a.a == b, a
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py
new file mode 100644
index 000000000..71e8c54be
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py
@@ -0,0 +1,10 @@
+import pytest, py
+mydir = py.path.local(__file__).dirpath()
+
+def pytest_runtest_setup(item):
+ if isinstance(item, pytest.Function):
+ if not item.fspath.relto(mydir):
+ return
+ mod = item.getparent(pytest.Module).obj
+ if hasattr(mod, 'hello'):
+ print ("mod.hello %r" % (mod.hello,))
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py
new file mode 100644
index 000000000..828e6b9fd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py
@@ -0,0 +1,5 @@
+
+hello = "world"
+
+def test_func():
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py
new file mode 100644
index 000000000..2e5cd20b1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py
@@ -0,0 +1,14 @@
+
+import py
+failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
+pytest_plugins = 'pytester',
+
+def test_failure_demo_fails_properly(testdir):
+ target = testdir.tmpdir.join(failure_demo.basename)
+ failure_demo.copy(target)
+ failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
+ result = testdir.runpytest(target, syspathinsert=True)
+ result.stdout.fnmatch_lines([
+ "*42 failed*"
+ ])
+ assert result.ret != 0
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py
new file mode 100644
index 000000000..512330cb4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py
@@ -0,0 +1,42 @@
+def setup_module(module):
+ module.TestStateFullThing.classcount = 0
+
+class TestStateFullThing:
+ def setup_class(cls):
+ cls.classcount += 1
+
+ def teardown_class(cls):
+ cls.classcount -= 1
+
+ def setup_method(self, method):
+ self.id = eval(method.__name__[5:])
+
+ def test_42(self):
+ assert self.classcount == 1
+ assert self.id == 42
+
+ def test_23(self):
+ assert self.classcount == 1
+ assert self.id == 23
+
+def teardown_module(module):
+ assert module.TestStateFullThing.classcount == 0
+
+""" For this example the control flow happens as follows::
+ import test_setup_flow_example
+ setup_module(test_setup_flow_example)
+ setup_class(TestStateFullThing)
+ instance = TestStateFullThing()
+ setup_method(instance, instance.test_42)
+ instance.test_42()
+ setup_method(instance, instance.test_23)
+ instance.test_23()
+ teardown_class(TestStateFullThing)
+ teardown_module(test_setup_flow_example)
+
+Note that ``setup_class(TestStateFullThing)`` is called and not
+``TestStateFullThing.setup_class()`` which would require you
+to insert ``setup_class = classmethod(setup_class)`` to make
+your setup function callable.
+"""
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst
new file mode 100644
index 000000000..1bc32b283
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst
@@ -0,0 +1,79 @@
+
+.. _`accept example`:
+
+example: specifying and selecting acceptance tests
+--------------------------------------------------------------
+
+.. sourcecode:: python
+
+ # ./conftest.py
+ def pytest_option(parser):
+ group = parser.getgroup("myproject")
+ group.addoption("-A", dest="acceptance", action="store_true",
+ help="run (slow) acceptance tests")
+
+ def pytest_funcarg__accept(request):
+ return AcceptFixture(request)
+
+ class AcceptFixture:
+ def __init__(self, request):
+ if not request.config.option.acceptance:
+ pytest.skip("specify -A to run acceptance tests")
+ self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True)
+
+ def run(self, cmd):
+ """ called by test code to execute an acceptance test. """
+ self.tmpdir.chdir()
+ return py.process.cmdexec(cmd)
+
+
+and the actual test function example:
+
+.. sourcecode:: python
+
+ def test_some_acceptance_aspect(accept):
+ accept.tmpdir.mkdir("somesub")
+ result = accept.run("ls -la")
+ assert "somesub" in result
+
+If you run this test without specifying a command line option
+the test will get skipped with an appropriate message. Otherwise
+you can start to add convenience and test support methods
+to your AcceptFixture and drive running of tools or
+applications and provide ways to do assertions about
+the output.
+
+.. _`decorate a funcarg`:
+
+example: decorating a funcarg in a test module
+--------------------------------------------------------------
+
+For larger scale setups it's sometimes useful to decorate
+a funcarg just for a particular test module. We can
+extend the `accept example`_ by putting this in our test module:
+
+.. sourcecode:: python
+
+ def pytest_funcarg__accept(request):
+ # call the next factory (living in our conftest.py)
+ arg = request.getfuncargvalue("accept")
+ # create a special layout in our tempdir
+ arg.tmpdir.mkdir("special")
+ return arg
+
+ class TestSpecialAcceptance:
+ def test_sometest(self, accept):
+ assert accept.tmpdir.join("special").check()
+
+Our module level factory will be invoked first and it can
+ask its request object to call the next factory and then
+decorate its result. This mechanism allows us to stay
+ignorant of how/where the function argument is provided -
+in our example from a `conftest plugin`_.
+
+sidenote: the temporary directory used here are instances of
+the `py.path.local`_ class which provides many of the os.path
+methods in a convenient way.
+
+.. _`py.path.local`: ../path.html#local
+.. _`conftest plugin`: customize.html#conftestplugin
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py
new file mode 100644
index 000000000..f905738c4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py
@@ -0,0 +1 @@
+collect_ignore = ["nonpython"]
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py
new file mode 100644
index 000000000..d689c11b2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py
@@ -0,0 +1,18 @@
+
+import pytest
+
+@pytest.fixture("session")
+def setup(request):
+ setup = CostlySetup()
+ request.addfinalizer(setup.finalize)
+ return setup
+
+class CostlySetup:
+ def __init__(self):
+ import time
+ print ("performing costly setup")
+ time.sleep(5)
+ self.timecostly = 1
+
+ def finalize(self):
+ del self.timecostly
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
new file mode 100644
index 000000000..d97657867
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
@@ -0,0 +1,3 @@
+
+def test_quick(setup):
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py
new file mode 100644
index 000000000..6ed6ee4d8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py
@@ -0,0 +1,6 @@
+def test_something(setup):
+ assert setup.timecostly == 1
+
+def test_something_more(setup):
+ assert setup.timecostly == 1
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/index.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/index.rst
new file mode 100644
index 000000000..363de5ab7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/index.rst
@@ -0,0 +1,34 @@
+
+.. _examples:
+
+Usages and Examples
+===========================================
+
+Here is a (growing) list of examples. :ref:`Contact <contact>` us if you
+need more examples or have questions. Also take a look at the
+:ref:`comprehensive documentation <toc>` which contains many example
+snippets as well. Also, `pytest on stackoverflow.com
+<http://stackoverflow.com/search?q=pytest>`_ often comes with example
+answers.
+
+For basic examples, see
+
+- :doc:`../getting-started` for basic introductory examples
+- :ref:`assert` for basic assertion examples
+- :ref:`fixtures` for basic fixture/setup examples
+- :ref:`parametrize` for basic test function parametrization
+- :doc:`../unittest` for basic unittest integration
+- :doc:`../nose` for basic nosetests integration
+
+The following examples aim at various use cases you might encounter.
+
+.. toctree::
+ :maxdepth: 2
+
+ reportingdemo
+ simple
+ parametrize
+ markers
+ special
+ pythoncollection
+ nonpython
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg b/testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg
new file mode 100644
index 000000000..02d3750ee
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg
@@ -0,0 +1,4 @@
+[pytest]
+testfilepatterns =
+ ${topdir}/tests/unit/test_${basename}
+ ${topdir}/tests/functional/*.py
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst
new file mode 100644
index 000000000..6bdc60347
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst
@@ -0,0 +1,592 @@
+
+.. _`mark examples`:
+
+Working with custom markers
+=================================================
+
+Here are some example using the :ref:`mark` mechanism.
+
+Marking test functions and selecting them for a run
+----------------------------------------------------
+
+You can "mark" a test function with custom metadata like this::
+
+ # content of test_server.py
+
+ import pytest
+ @pytest.mark.webtest
+ def test_send_http():
+ pass # perform some webtest test for your app
+ def test_something_quick():
+ pass
+ def test_another():
+ pass
+ class TestClass:
+ def test_method(self):
+ pass
+
+.. versionadded:: 2.2
+
+You can then restrict a test run to only run tests marked with ``webtest``::
+
+ $ py.test -v -m webtest
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_send_http PASSED
+
+ ======= 3 tests deselected by "-m 'webtest'" ========
+ ======= 1 passed, 3 deselected in 0.12 seconds ========
+
+Or the inverse, running all tests except the webtest ones::
+
+ $ py.test -v -m "not webtest"
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_something_quick PASSED
+ test_server.py::test_another PASSED
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 tests deselected by "-m 'not webtest'" ========
+ ======= 3 passed, 1 deselected in 0.12 seconds ========
+
+Selecting tests based on their node ID
+--------------------------------------
+
+You can provide one or more :ref:`node IDs <node-id>` as positional
+arguments to select only specified tests. This makes it easy to select
+tests based on their module, class, method, or function name::
+
+ $ py.test -v test_server.py::TestClass::test_method
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 5 items
+
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 passed in 0.12 seconds ========
+
+You can also select on the class::
+
+ $ py.test -v test_server.py::TestClass
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 passed in 0.12 seconds ========
+
+Or select multiple nodes::
+
+ $ py.test -v test_server.py::TestClass test_server.py::test_send_http
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 8 items
+
+ test_server.py::TestClass::test_method PASSED
+ test_server.py::test_send_http PASSED
+
+ ======= 2 passed in 0.12 seconds ========
+
+.. _node-id:
+
+.. note::
+
+ Node IDs are of the form ``module.py::class::method`` or
+ ``module.py::function``. Node IDs control which tests are
+ collected, so ``module.py::class`` will select all test methods
+ on the class. Nodes are also created for each parameter of a
+ parametrized fixture or test, so selecting a parametrized test
+ must include the parameter value, e.g.
+ ``module.py::function[param]``.
+
+ Node IDs for failing tests are displayed in the test summary info
+ when running py.test with the ``-rf`` option. You can also
+ construct Node IDs from the output of ``py.test --collectonly``.
+
+Using ``-k expr`` to select tests based on their name
+-------------------------------------------------------
+
+.. versionadded: 2.0/2.3.4
+
+You can use the ``-k`` command line option to specify an expression
+which implements a substring match on the test names instead of the
+exact match on markers that ``-m`` provides. This makes it easy to
+select tests based on their names::
+
+ $ py.test -v -k http # running with the above defined example module
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_send_http PASSED
+
+ ======= 3 tests deselected by '-khttp' ========
+ ======= 1 passed, 3 deselected in 0.12 seconds ========
+
+And you can also run all tests except the ones that match the keyword::
+
+ $ py.test -k "not send_http" -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_something_quick PASSED
+ test_server.py::test_another PASSED
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 tests deselected by '-knot send_http' ========
+ ======= 3 passed, 1 deselected in 0.12 seconds ========
+
+Or to select "http" and "quick" tests::
+
+ $ py.test -k "http or quick" -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_send_http PASSED
+ test_server.py::test_something_quick PASSED
+
+ ======= 2 tests deselected by '-khttp or quick' ========
+ ======= 2 passed, 2 deselected in 0.12 seconds ========
+
+.. note::
+
+ If you are using expressions such as "X and Y" then both X and Y
+ need to be simple non-keyword names. For example, "pass" or "from"
+ will result in SyntaxErrors because "-k" evaluates the expression.
+
+ However, if the "-k" argument is a simple string, no such restrictions
+ apply. Also "-k 'not STRING'" has no restrictions. You can also
+ specify numbers like "-k 1.3" to match tests which are parametrized
+ with the float "1.3".
+
+Registering markers
+-------------------------------------
+
+.. versionadded:: 2.2
+
+.. ini-syntax for custom markers:
+
+Registering markers for your test suite is simple::
+
+ # content of pytest.ini
+ [pytest]
+ markers =
+ webtest: mark a test as a webtest.
+
+You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
+
+ $ py.test --markers
+ @pytest.mark.webtest: mark a test as a webtest.
+
+ @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
+
+ @pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
+
+ @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
+
+ @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
+
+ @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
+
+ @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
+
+
+For an example on how to add and work with markers from a plugin, see
+:ref:`adding a custom marker from a plugin`.
+
+.. note::
+
+ It is recommended to explicitly register markers so that:
+
+ * there is one place in your test suite defining your markers
+
+ * asking for existing markers via ``py.test --markers`` gives good output
+
+ * typos in function markers are treated as an error if you use
+ the ``--strict`` option. Future versions of ``pytest`` are probably
+ going to start treating non-registered markers as errors at some point.
+
+.. _`scoped-marking`:
+
+Marking whole classes or modules
+----------------------------------------------------
+
+You may use ``pytest.mark`` decorators with classes to apply markers to all of
+its test methods::
+
+ # content of test_mark_classlevel.py
+ import pytest
+ @pytest.mark.webtest
+ class TestClass:
+ def test_startup(self):
+ pass
+ def test_startup_and_more(self):
+ pass
+
+This is equivalent to directly applying the decorator to the
+two test functions.
+
+To remain backward-compatible with Python 2.4 you can also set a
+``pytestmark`` attribute on a TestClass like this::
+
+ import pytest
+
+ class TestClass:
+ pytestmark = pytest.mark.webtest
+
+or if you need to use multiple markers you can use a list::
+
+ import pytest
+
+ class TestClass:
+ pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
+
+You can also set a module level marker::
+
+ import pytest
+ pytestmark = pytest.mark.webtest
+
+in which case it will be applied to all functions and
+methods defined in the module.
+
+.. _`marking individual tests when using parametrize`:
+
+Marking individual tests when using parametrize
+-----------------------------------------------
+
+When using parametrize, applying a mark will make it apply
+to each individual test. However it is also possible to
+apply a marker to an individual test instance::
+
+ import pytest
+
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
+In this example the mark "foo" will apply to each of the three
+tests, whereas the "bar" mark is only applied to the second test.
+Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`.
+
+.. note::
+
+ If the data you are parametrizing happen to be single callables, you need to be careful
+ when marking these items. `pytest.mark.xfail(my_func)` won't work because it's also the
+ signature of a function being decorated. To resolve this ambiguity, you need to pass a
+ reason argument:
+ `pytest.mark.xfail(func_bar, reason="Issue#7")`.
+
+
+.. _`adding a custom marker from a plugin`:
+
+Custom marker and command line option to control test runs
+----------------------------------------------------------
+
+.. regendoc:wipe
+
+Plugins can provide custom markers and implement specific behaviour
+based on it. This is a self-contained example which adds a command
+line option and a parametrized test function marker to run tests
+specifies via named environments::
+
+ # content of conftest.py
+
+ import pytest
+ def pytest_addoption(parser):
+ parser.addoption("-E", action="store", metavar="NAME",
+ help="only run tests matching the environment NAME.")
+
+ def pytest_configure(config):
+ # register an additional marker
+ config.addinivalue_line("markers",
+ "env(name): mark test to run only on named environment")
+
+ def pytest_runtest_setup(item):
+ envmarker = item.get_marker("env")
+ if envmarker is not None:
+ envname = envmarker.args[0]
+ if envname != item.config.getoption("-E"):
+ pytest.skip("test requires env %r" % envname)
+
+A test file using this local plugin::
+
+ # content of test_someenv.py
+
+ import pytest
+ @pytest.mark.env("stage1")
+ def test_basic_db_operation():
+ pass
+
+and an example invocations specifying a different environment than what
+the test needs::
+
+ $ py.test -E stage2
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_someenv.py s
+
+ ======= 1 skipped in 0.12 seconds ========
+
+and here is one that specifies exactly the environment needed::
+
+ $ py.test -E stage1
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_someenv.py .
+
+ ======= 1 passed in 0.12 seconds ========
+
+The ``--markers`` option always gives you a list of available markers::
+
+ $ py.test --markers
+ @pytest.mark.env(name): mark test to run only on named environment
+
+ @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
+
+ @pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
+
+ @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
+
+ @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
+
+ @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
+
+ @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
+
+
+Reading markers which were set from multiple places
+----------------------------------------------------
+
+.. versionadded: 2.2.2
+
+.. regendoc:wipe
+
+If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin
+code you can read over all such settings. Example::
+
+ # content of test_mark_three_times.py
+ import pytest
+ pytestmark = pytest.mark.glob("module", x=1)
+
+ @pytest.mark.glob("class", x=2)
+ class TestClass:
+ @pytest.mark.glob("function", x=3)
+ def test_something(self):
+ pass
+
+Here we have the marker "glob" applied three times to the same
+test function. From a conftest file we can read it like this::
+
+ # content of conftest.py
+ import sys
+
+ def pytest_runtest_setup(item):
+ g = item.get_marker("glob")
+ if g is not None:
+ for info in g:
+ print ("glob args=%s kwargs=%s" %(info.args, info.kwargs))
+ sys.stdout.flush()
+
+Let's run this without capturing output and see what we get::
+
+ $ py.test -q -s
+ glob args=('function',) kwargs={'x': 3}
+ glob args=('class',) kwargs={'x': 2}
+ glob args=('module',) kwargs={'x': 1}
+ .
+ 1 passed in 0.12 seconds
+
+marking platform specific tests with pytest
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Consider you have a test suite which marks tests for particular platforms,
+namely ``pytest.mark.darwin``, ``pytest.mark.win32`` etc. and you
+also have tests that run on all platforms and have no specific
+marker. If you now want to have a way to only run the tests
+for your particular platform, you could use the following plugin::
+
+ # content of conftest.py
+ #
+ import sys
+ import pytest
+
+ ALL = set("darwin linux2 win32".split())
+
+ def pytest_runtest_setup(item):
+ if isinstance(item, item.Function):
+ plat = sys.platform
+ if not item.get_marker(plat):
+ if ALL.intersection(item.keywords):
+ pytest.skip("cannot run on platform %s" %(plat))
+
+then tests will be skipped if they were specified for a different platform.
+Let's do a little test file to show how this looks like::
+
+ # content of test_plat.py
+
+ import pytest
+
+ @pytest.mark.darwin
+ def test_if_apple_is_evil():
+ pass
+
+ @pytest.mark.linux2
+ def test_if_linux_works():
+ pass
+
+ @pytest.mark.win32
+ def test_if_win32_crashes():
+ pass
+
+ def test_runs_everywhere():
+ pass
+
+then you will see two test skipped and two executed tests as expected::
+
+ $ py.test -rs # this option reports skip reasons
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_plat.py sss.
+ ======= short test summary info ========
+ SKIP [3] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux
+
+ ======= 1 passed, 3 skipped in 0.12 seconds ========
+
+Note that if you specify a platform via the marker-command line option like this::
+
+ $ py.test -m linux2
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_plat.py s
+
+ ======= 3 tests deselected by "-m 'linux2'" ========
+ ======= 1 skipped, 3 deselected in 0.12 seconds ========
+
+then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
+
+Automatically adding markers based on test names
+--------------------------------------------------------
+
+.. regendoc:wipe
+
+If you a test suite where test function names indicate a certain
+type of test, you can implement a hook that automatically defines
+markers so that you can use the ``-m`` option with it. Let's look
+at this test module::
+
+ # content of test_module.py
+
+ def test_interface_simple():
+ assert 0
+
+ def test_interface_complex():
+ assert 0
+
+ def test_event_simple():
+ assert 0
+
+ def test_something_else():
+ assert 0
+
+We want to dynamically define two markers and can do it in a
+``conftest.py`` plugin::
+
+ # content of conftest.py
+
+ import pytest
+ def pytest_collection_modifyitems(items):
+ for item in items:
+ if "interface" in item.nodeid:
+ item.add_marker(pytest.mark.interface)
+ elif "event" in item.nodeid:
+ item.add_marker(pytest.mark.event)
+
+We can now use the ``-m option`` to select one set::
+
+ $ py.test -m interface --tb=short
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_module.py FF
+
+ ======= FAILURES ========
+ _______ test_interface_simple ________
+ test_module.py:3: in test_interface_simple
+ assert 0
+ E assert 0
+ _______ test_interface_complex ________
+ test_module.py:6: in test_interface_complex
+ assert 0
+ E assert 0
+ ======= 2 tests deselected by "-m 'interface'" ========
+ ======= 2 failed, 2 deselected in 0.12 seconds ========
+
+or to select both "event" and "interface" tests::
+
+ $ py.test -m "interface or event" --tb=short
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_module.py FFF
+
+ ======= FAILURES ========
+ _______ test_interface_simple ________
+ test_module.py:3: in test_interface_simple
+ assert 0
+ E assert 0
+ _______ test_interface_complex ________
+ test_module.py:6: in test_interface_complex
+ assert 0
+ E assert 0
+ _______ test_event_simple ________
+ test_module.py:9: in test_event_simple
+ assert 0
+ E assert 0
+ ======= 1 tests deselected by "-m 'interface or event'" ========
+ ======= 3 failed, 1 deselected in 0.12 seconds ========
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py b/testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py
new file mode 100644
index 000000000..66a368a12
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py
@@ -0,0 +1,52 @@
+"""
+module containing a parametrized tests testing cross-python
+serialization via the pickle module.
+"""
+import py
+import pytest
+import _pytest._code
+
+pythonlist = ['python2.6', 'python2.7', 'python3.3']
+@pytest.fixture(params=pythonlist)
+def python1(request, tmpdir):
+ picklefile = tmpdir.join("data.pickle")
+ return Python(request.param, picklefile)
+
+@pytest.fixture(params=pythonlist)
+def python2(request, python1):
+ return Python(request.param, python1.picklefile)
+
+class Python:
+ def __init__(self, version, picklefile):
+ self.pythonpath = py.path.local.sysfind(version)
+ if not self.pythonpath:
+ pytest.skip("%r not found" %(version,))
+ self.picklefile = picklefile
+ def dumps(self, obj):
+ dumpfile = self.picklefile.dirpath("dump.py")
+ dumpfile.write(_pytest._code.Source("""
+ import pickle
+ f = open(%r, 'wb')
+ s = pickle.dump(%r, f, protocol=2)
+ f.close()
+ """ % (str(self.picklefile), obj)))
+ py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
+
+ def load_and_is_true(self, expression):
+ loadfile = self.picklefile.dirpath("load.py")
+ loadfile.write(_pytest._code.Source("""
+ import pickle
+ f = open(%r, 'rb')
+ obj = pickle.load(f)
+ f.close()
+ res = eval(%r)
+ if not res:
+ raise SystemExit(1)
+ """ % (str(self.picklefile), expression)))
+ print (loadfile)
+ py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))
+
+@pytest.mark.parametrize("obj", [42, {}, {1:3},])
+def test_basic_objects(python1, python2, obj):
+ python1.dumps(obj)
+ python2.load_and_is_true("obj == %s" % obj)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst
new file mode 100644
index 000000000..6437e3984
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst
@@ -0,0 +1,91 @@
+
+.. _`non-python tests`:
+
+Working with non-python tests
+====================================================
+
+.. _`yaml plugin`:
+
+A basic example for specifying tests in Yaml files
+--------------------------------------------------------------
+
+.. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py
+.. _`PyYAML`: http://pypi.python.org/pypi/PyYAML/
+
+Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yml`` files and will execute the yaml-formatted content as custom tests:
+
+.. include:: nonpython/conftest.py
+ :literal:
+
+You can create a simple example file:
+
+.. include:: nonpython/test_simple.yml
+ :literal:
+
+and if you installed `PyYAML`_ or a compatible YAML-parser you can
+now execute the test specification::
+
+ nonpython $ py.test test_simple.yml
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
+ collected 2 items
+
+ test_simple.yml F.
+
+ ======= FAILURES ========
+ _______ usecase: hello ________
+ usecase execution failed
+ spec failed: 'some': 'other'
+ no further details known at this point.
+ ======= 1 failed, 1 passed in 0.12 seconds ========
+
+.. regendoc:wipe
+
+You get one dot for the passing ``sub1: sub1`` check and one failure.
+Obviously in the above ``conftest.py`` you'll want to implement a more
+interesting interpretation of the yaml-values. You can easily write
+your own domain specific testing language this way.
+
+.. note::
+
+ ``repr_failure(excinfo)`` is called for representing test failures.
+ If you create custom collection nodes you can return an error
+ representation string of your choice. It
+ will be reported as a (red) string.
+
+``reportinfo()`` is used for representing the test location and is also
+consulted when reporting in ``verbose`` mode::
+
+ nonpython $ py.test -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
+ collecting ... collected 2 items
+
+ test_simple.yml::hello FAILED
+ test_simple.yml::ok PASSED
+
+ ======= FAILURES ========
+ _______ usecase: hello ________
+ usecase execution failed
+ spec failed: 'some': 'other'
+ no further details known at this point.
+ ======= 1 failed, 1 passed in 0.12 seconds ========
+
+.. regendoc:wipe
+
+While developing your custom test collection and execution it's also
+interesting to just look at the collection tree::
+
+ nonpython $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
+ collected 2 items
+ <YamlFile 'test_simple.yml'>
+ <YamlItem 'hello'>
+ <YamlItem 'ok'>
+
+ ======= no tests ran in 0.12 seconds ========
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py
new file mode 100644
index 000000000..2406e8f10
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py
@@ -0,0 +1,40 @@
+# content of conftest.py
+
+import pytest
+
+def pytest_collect_file(parent, path):
+ if path.ext == ".yml" and path.basename.startswith("test"):
+ return YamlFile(path, parent)
+
+class YamlFile(pytest.File):
+ def collect(self):
+ import yaml # we need a yaml parser, e.g. PyYAML
+ raw = yaml.safe_load(self.fspath.open())
+ for name, spec in raw.items():
+ yield YamlItem(name, self, spec)
+
+class YamlItem(pytest.Item):
+ def __init__(self, name, parent, spec):
+ super(YamlItem, self).__init__(name, parent)
+ self.spec = spec
+
+ def runtest(self):
+ for name, value in self.spec.items():
+ # some custom test execution (dumb example follows)
+ if name != value:
+ raise YamlException(self, name, value)
+
+ def repr_failure(self, excinfo):
+ """ called when self.runtest() raises an exception. """
+ if isinstance(excinfo.value, YamlException):
+ return "\n".join([
+ "usecase execution failed",
+ " spec failed: %r: %r" % excinfo.value.args[1:3],
+ " no further details known at this point."
+ ])
+
+ def reportinfo(self):
+ return self.fspath, 0, "usecase: %s" % self.name
+
+class YamlException(Exception):
+ """ custom exception for error reporting. """
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml
new file mode 100644
index 000000000..f0d8d11fc
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml
@@ -0,0 +1,7 @@
+# test_simple.yml
+ok:
+ sub1: sub1
+
+hello:
+ world: world
+ some: other
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst
new file mode 100644
index 000000000..5d637ffcb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst
@@ -0,0 +1,475 @@
+
+.. _paramexamples:
+
+Parametrizing tests
+=================================================
+
+.. currentmodule:: _pytest.python
+
+``pytest`` allows to easily parametrize test functions.
+For basic docs, see :ref:`parametrize-basics`.
+
+In the following we provide some examples using
+the builtin mechanisms.
+
+Generating parameters combinations, depending on command line
+----------------------------------------------------------------------------
+
+.. regendoc:wipe
+
+Let's say we want to execute a test with different computation
+parameters and the parameter range shall be determined by a command
+line argument. Let's first write a simple (do-nothing) computation test::
+
+ # content of test_compute.py
+
+ def test_compute(param1):
+ assert param1 < 4
+
+Now we add a test configuration like this::
+
+ # content of conftest.py
+
+ def pytest_addoption(parser):
+ parser.addoption("--all", action="store_true",
+ help="run all combinations")
+
+ def pytest_generate_tests(metafunc):
+ if 'param1' in metafunc.fixturenames:
+ if metafunc.config.option.all:
+ end = 5
+ else:
+ end = 2
+ metafunc.parametrize("param1", range(end))
+
+This means that we only run 2 tests if we do not pass ``--all``::
+
+ $ py.test -q test_compute.py
+ ..
+ 2 passed in 0.12 seconds
+
+We run only two computations, so we see two dots.
+let's run the full monty::
+
+ $ py.test -q --all
+ ....F
+ ======= FAILURES ========
+ _______ test_compute[4] ________
+
+ param1 = 4
+
+ def test_compute(param1):
+ > assert param1 < 4
+ E assert 4 < 4
+
+ test_compute.py:3: AssertionError
+ 1 failed, 4 passed in 0.12 seconds
+
+As expected when running the full range of ``param1`` values
+we'll get an error on the last one.
+
+
+Different options for test IDs
+------------------------------------
+
+pytest will build a string that is the test ID for each set of values in a
+parametrized test. These IDs can be used with ``-k`` to select specific cases
+to run, and they will also identify the specific case when one is failing.
+Running pytest with ``--collect-only`` will show the generated IDs.
+
+Numbers, strings, booleans and None will have their usual string representation
+used in the test ID. For other objects, pytest will make a string based on
+the argument name::
+
+ # content of test_time.py
+
+ import pytest
+
+ from datetime import datetime, timedelta
+
+ testdata = [
+ (datetime(2001, 12, 12), datetime(2001, 12, 11), timedelta(1)),
+ (datetime(2001, 12, 11), datetime(2001, 12, 12), timedelta(-1)),
+ ]
+
+
+ @pytest.mark.parametrize("a,b,expected", testdata)
+ def test_timedistance_v0(a, b, expected):
+ diff = a - b
+ assert diff == expected
+
+
+ @pytest.mark.parametrize("a,b,expected", testdata, ids=["forward", "backward"])
+ def test_timedistance_v1(a, b, expected):
+ diff = a - b
+ assert diff == expected
+
+
+ def idfn(val):
+ if isinstance(val, (datetime,)):
+ # note this wouldn't show any hours/minutes/seconds
+ return val.strftime('%Y%m%d')
+
+
+ @pytest.mark.parametrize("a,b,expected", testdata, ids=idfn)
+ def test_timedistance_v2(a, b, expected):
+ diff = a - b
+ assert diff == expected
+
+
+In ``test_timedistance_v0``, we let pytest generate the test IDs.
+
+In ``test_timedistance_v1``, we specified ``ids`` as a list of strings which were
+used as the test IDs. These are succinct, but can be a pain to maintain.
+
+In ``test_timedistance_v2``, we specified ``ids`` as a function that can generate a
+string representation to make part of the test ID. So our ``datetime`` values use the
+label generated by ``idfn``, but because we didn't generate a label for ``timedelta``
+objects, they are still using the default pytest representation::
+
+
+ $ py.test test_time.py --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 6 items
+ <Module 'test_time.py'>
+ <Function 'test_timedistance_v0[a0-b0-expected0]'>
+ <Function 'test_timedistance_v0[a1-b1-expected1]'>
+ <Function 'test_timedistance_v1[forward]'>
+ <Function 'test_timedistance_v1[backward]'>
+ <Function 'test_timedistance_v2[20011212-20011211-expected0]'>
+ <Function 'test_timedistance_v2[20011211-20011212-expected1]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+A quick port of "testscenarios"
+------------------------------------
+
+.. _`test scenarios`: http://pypi.python.org/pypi/testscenarios/
+
+Here is a quick port to run tests configured with `test scenarios`_,
+an add-on from Robert Collins for the standard unittest framework. We
+only have to work a bit to construct the correct arguments for pytest's
+:py:func:`Metafunc.parametrize`::
+
+ # content of test_scenarios.py
+
+ def pytest_generate_tests(metafunc):
+ idlist = []
+ argvalues = []
+ for scenario in metafunc.cls.scenarios:
+ idlist.append(scenario[0])
+ items = scenario[1].items()
+ argnames = [x[0] for x in items]
+ argvalues.append(([x[1] for x in items]))
+ metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
+
+ scenario1 = ('basic', {'attribute': 'value'})
+ scenario2 = ('advanced', {'attribute': 'value2'})
+
+ class TestSampleWithScenarios:
+ scenarios = [scenario1, scenario2]
+
+ def test_demo1(self, attribute):
+ assert isinstance(attribute, str)
+
+ def test_demo2(self, attribute):
+ assert isinstance(attribute, str)
+
+this is a fully self-contained example which you can run with::
+
+ $ py.test test_scenarios.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_scenarios.py ....
+
+ ======= 4 passed in 0.12 seconds ========
+
+If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
+
+
+ $ py.test --collect-only test_scenarios.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+ <Module 'test_scenarios.py'>
+ <Class 'TestSampleWithScenarios'>
+ <Instance '()'>
+ <Function 'test_demo1[basic]'>
+ <Function 'test_demo2[basic]'>
+ <Function 'test_demo1[advanced]'>
+ <Function 'test_demo2[advanced]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+Note that we told ``metafunc.parametrize()`` that your scenario values
+should be considered class-scoped. With pytest-2.3 this leads to a
+resource-based ordering.
+
+Deferring the setup of parametrized resources
+---------------------------------------------------
+
+.. regendoc:wipe
+
+The parametrization of test functions happens at collection
+time. It is a good idea to setup expensive resources like DB
+connections or subprocess only when the actual test is run.
+Here is a simple example how you can achieve that, first
+the actual test requiring a ``db`` object::
+
+ # content of test_backends.py
+
+ import pytest
+ def test_db_initialized(db):
+ # a dummy test
+ if db.__class__.__name__ == "DB2":
+ pytest.fail("deliberately failing for demo purposes")
+
+We can now add a test configuration that generates two invocations of
+the ``test_db_initialized`` function and also implements a factory that
+creates a database object for the actual test invocations::
+
+ # content of conftest.py
+ import pytest
+
+ def pytest_generate_tests(metafunc):
+ if 'db' in metafunc.fixturenames:
+ metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
+
+ class DB1:
+ "one database object"
+ class DB2:
+ "alternative database object"
+
+ @pytest.fixture
+ def db(request):
+ if request.param == "d1":
+ return DB1()
+ elif request.param == "d2":
+ return DB2()
+ else:
+ raise ValueError("invalid internal test config")
+
+Let's first see how it looks like at collection time::
+
+ $ py.test test_backends.py --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+ <Module 'test_backends.py'>
+ <Function 'test_db_initialized[d1]'>
+ <Function 'test_db_initialized[d2]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+And then when we run the test::
+
+ $ py.test -q test_backends.py
+ .F
+ ======= FAILURES ========
+ _______ test_db_initialized[d2] ________
+
+ db = <conftest.DB2 object at 0xdeadbeef>
+
+ def test_db_initialized(db):
+ # a dummy test
+ if db.__class__.__name__ == "DB2":
+ > pytest.fail("deliberately failing for demo purposes")
+ E Failed: deliberately failing for demo purposes
+
+ test_backends.py:6: Failed
+ 1 failed, 1 passed in 0.12 seconds
+
+The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
+
+.. regendoc:wipe
+
+Apply indirect on particular arguments
+---------------------------------------------------
+
+Very often parametrization uses more than one argument name. There is opportunity to apply ``indirect``
+parameter on particular arguments. It can be done by passing list or tuple of
+arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses
+two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the
+fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a``
+will be passed to respective fixture function::
+
+ # content of test_indirect_list.py
+
+ import pytest
+ @pytest.fixture(scope='function')
+ def x(request):
+ return request.param * 3
+
+ @pytest.fixture(scope='function')
+ def y(request):
+ return request.param * 2
+
+ @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
+ def test_indirect(x,y):
+ assert x == 'aaa'
+ assert y == 'b'
+
+The result of this test will be successful::
+
+ $ py.test test_indirect_list.py --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+ <Module 'test_indirect_list.py'>
+ <Function 'test_indirect[a-b]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. regendoc:wipe
+
+Parametrizing test methods through per-class configuration
+--------------------------------------------------------------
+
+.. _`unittest parametrizer`: https://github.com/testing-cabal/unittest-ext/blob/master/params.py
+
+
+Here is an example ``pytest_generate_function`` function implementing a
+parametrization scheme similar to Michael Foord's `unittest
+parametrizer`_ but in a lot less code::
+
+ # content of ./test_parametrize.py
+ import pytest
+
+ def pytest_generate_tests(metafunc):
+ # called once per each test function
+ funcarglist = metafunc.cls.params[metafunc.function.__name__]
+ argnames = list(funcarglist[0])
+ metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
+ for funcargs in funcarglist])
+
+ class TestClass:
+ # a map specifying multiple argument sets for a test method
+ params = {
+ 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
+ 'test_zerodivision': [dict(a=1, b=0), ],
+ }
+
+ def test_equals(self, a, b):
+ assert a == b
+
+ def test_zerodivision(self, a, b):
+ pytest.raises(ZeroDivisionError, "a/b")
+
+Our test generator looks up a class-level definition which specifies which
+argument sets to use for each test function. Let's run it::
+
+ $ py.test -q
+ F..
+ ======= FAILURES ========
+ _______ TestClass.test_equals[1-2] ________
+
+ self = <test_parametrize.TestClass object at 0xdeadbeef>, a = 1, b = 2
+
+ def test_equals(self, a, b):
+ > assert a == b
+ E assert 1 == 2
+
+ test_parametrize.py:18: AssertionError
+ 1 failed, 2 passed in 0.12 seconds
+
+Indirect parametrization with multiple fixtures
+--------------------------------------------------------------
+
+Here is a stripped down real-life example of using parametrized
+testing for testing serialization of objects between different python
+interpreters. We define a ``test_basic_objects`` function which
+is to be run with different sets of arguments for its three arguments:
+
+* ``python1``: first python interpreter, run to pickle-dump an object to a file
+* ``python2``: second interpreter, run to pickle-load an object from a file
+* ``obj``: object to be dumped/loaded
+
+.. literalinclude:: multipython.py
+
+Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
+
+ . $ py.test -rs -q multipython.py
+ ssssssssssss...ssssssssssss
+ ======= short test summary info ========
+ SKIP [12] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python3.3' not found
+ SKIP [12] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python2.6' not found
+ 3 passed, 24 skipped in 0.12 seconds
+
+Indirect parametrization of optional implementations/imports
+--------------------------------------------------------------------
+
+If you want to compare the outcomes of several implementations of a given
+API, you can write test functions that receive the already imported implementations
+and get skipped in case the implementation is not importable/available. Let's
+say we have a "base" implementation and the other (possibly optimized ones)
+need to provide similar results::
+
+ # content of conftest.py
+
+ import pytest
+
+ @pytest.fixture(scope="session")
+ def basemod(request):
+ return pytest.importorskip("base")
+
+ @pytest.fixture(scope="session", params=["opt1", "opt2"])
+ def optmod(request):
+ return pytest.importorskip(request.param)
+
+And then a base implementation of a simple function::
+
+ # content of base.py
+ def func1():
+ return 1
+
+And an optimized version::
+
+ # content of opt1.py
+ def func1():
+ return 1.0001
+
+And finally a little test module::
+
+ # content of test_module.py
+
+ def test_func1(basemod, optmod):
+ assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
+
+
+If you run this with reporting for skips enabled::
+
+ $ py.test -rs test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py .s
+ ======= short test summary info ========
+ SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2'
+
+ ======= 1 passed, 1 skipped in 0.12 seconds ========
+
+You'll see that we don't have a ``opt2`` module and thus the second test run
+of our ``test_func1`` was skipped. A few notes:
+
+- the fixture functions in the ``conftest.py`` file are "session-scoped" because we
+ don't need to import more than once
+
+- if you have multiple test functions and a skipped import, you will see
+ the ``[1]`` count increasing in the report
+
+- you can put :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>` style
+ parametrization on the test functions to parametrize input/output
+ values as well.
+
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py
new file mode 100644
index 000000000..81cd1fb11
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py
@@ -0,0 +1,16 @@
+import sys
+import pytest
+
+py3 = sys.version_info[0] >= 3
+
+class DummyCollector(pytest.collect.File):
+ def collect(self):
+ return []
+
+def pytest_pycollect_makemodule(path, parent):
+ bn = path.basename
+ if "py3" in bn and not py3 or ("py2" in bn and py3):
+ return DummyCollector(path, parent=parent)
+
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py
new file mode 100644
index 000000000..e09ed9466
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py
@@ -0,0 +1,7 @@
+
+def test_exception_syntax():
+ try:
+ 0/0
+ except ZeroDivisionError, e:
+ pass
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py
new file mode 100644
index 000000000..a811f2bbc
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py
@@ -0,0 +1,7 @@
+
+def test_exception_syntax():
+ try:
+ 0/0
+ except ZeroDivisionError as e:
+ pass
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py
new file mode 100644
index 000000000..05858eb85
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py
@@ -0,0 +1,11 @@
+
+# run this with $ py.test --collect-only test_collectonly.py
+#
+def test_function():
+ pass
+
+class TestClass:
+ def test_method(self):
+ pass
+ def test_anothermethod(self):
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst
new file mode 100644
index 000000000..5faf4c6c8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst
@@ -0,0 +1,192 @@
+Changing standard (Python) test discovery
+===============================================
+
+Ignore paths during test collection
+-----------------------------------
+
+You can easily ignore certain test directories and modules during collection
+by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
+``--ignore`` options. Example::
+
+ tests/
+ ├── example
+ │   ├── test_example_01.py
+ │   ├── test_example_02.py
+ │   └── test_example_03.py
+ ├── foobar
+ │   ├── test_foobar_01.py
+ │   ├── test_foobar_02.py
+ │   └── test_foobar_03.py
+ └── hello
+ └── world
+ ├── test_world_01.py
+ ├── test_world_02.py
+ └── test_world_03.py
+
+Now if you invoke ``pytest`` with ``--ignore=tests/foobar/test_foobar_03.py --ignore=tests/hello/``,
+you will see that ``pytest`` only collects test-modules, which do not match the patterns specified::
+
+ ========= test session starts ==========
+ platform darwin -- Python 2.7.10, pytest-2.8.2, py-1.4.30, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 5 items
+
+ tests/example/test_example_01.py .
+ tests/example/test_example_02.py .
+ tests/example/test_example_03.py .
+ tests/foobar/test_foobar_01.py .
+ tests/foobar/test_foobar_02.py .
+
+ ======= 5 passed in 0.02 seconds =======
+
+
+Changing directory recursion
+-----------------------------------------------------
+
+You can set the :confval:`norecursedirs` option in an ini-file, for example your ``setup.cfg`` in the project root directory::
+
+ # content of setup.cfg
+ [pytest]
+ norecursedirs = .svn _build tmp*
+
+This would tell ``pytest`` to not recurse into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory.
+
+.. _`change naming conventions`:
+
+Changing naming conventions
+-----------------------------------------------------
+
+You can configure different naming conventions by setting
+the :confval:`python_files`, :confval:`python_classes` and
+:confval:`python_functions` configuration options. Example::
+
+ # content of setup.cfg
+ # can also be defined in in tox.ini or pytest.ini file
+ [pytest]
+ python_files=check_*.py
+ python_classes=Check
+ python_functions=*_check
+
+This would make ``pytest`` look for tests in files that match the ``check_*
+.py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods
+that match ``*_check``. For example, if we have::
+
+ # content of check_myapp.py
+ class CheckMyApp:
+ def simple_check(self):
+ pass
+ def complex_check(self):
+ pass
+
+then the test collection looks like this::
+
+ $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: setup.cfg
+ collected 2 items
+ <Module 'check_myapp.py'>
+ <Class 'CheckMyApp'>
+ <Instance '()'>
+ <Function 'simple_check'>
+ <Function 'complex_check'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. note::
+
+ the ``python_functions`` and ``python_classes`` options has no effect
+ for ``unittest.TestCase`` test discovery because pytest delegates
+ detection of test case methods to unittest code.
+
+Interpreting cmdline arguments as Python packages
+-----------------------------------------------------
+
+You can use the ``--pyargs`` option to make ``pytest`` try
+interpreting arguments as python package names, deriving
+their file system path and then running the test. For
+example if you have unittest2 installed you can type::
+
+ py.test --pyargs unittest2.test.test_skipping -q
+
+which would run the respective test module. Like with
+other options, through an ini-file and the :confval:`addopts` option you
+can make this change more permanently::
+
+ # content of pytest.ini
+ [pytest]
+ addopts = --pyargs
+
+Now a simple invocation of ``py.test NAME`` will check
+if NAME exists as an importable package/module and otherwise
+treat it as a filesystem path.
+
+Finding out what is collected
+-----------------------------------------------
+
+You can always peek at the collection tree without running tests like this::
+
+ . $ py.test --collect-only pythoncollection.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
+ collected 3 items
+ <Module 'CWD/pythoncollection.py'>
+ <Function 'test_function'>
+ <Class 'TestClass'>
+ <Instance '()'>
+ <Function 'test_method'>
+ <Function 'test_anothermethod'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+customizing test collection to find all .py files
+---------------------------------------------------------
+
+.. regendoc:wipe
+
+You can easily instruct ``pytest`` to discover tests from every python file::
+
+
+ # content of pytest.ini
+ [pytest]
+ python_files = *.py
+
+However, many projects will have a ``setup.py`` which they don't want to be imported. Moreover, there may files only importable by a specific python version.
+For such cases you can dynamically define files to be ignored by listing
+them in a ``conftest.py`` file::
+
+ # content of conftest.py
+ import sys
+
+ collect_ignore = ["setup.py"]
+ if sys.version_info[0] > 2:
+ collect_ignore.append("pkg/module_py2.py")
+
+And then if you have a module file like this::
+
+ # content of pkg/module_py2.py
+ def test_only_on_python2():
+ try:
+ assert 0
+ except Exception, e:
+ pass
+
+and a setup.py dummy file like this::
+
+ # content of setup.py
+ 0/0 # will raise exception if imported
+
+then a pytest run on python2 will find the one test when run with a python2
+interpreters and will leave out the setup.py file::
+
+ $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst
new file mode 100644
index 000000000..28624aa07
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst
@@ -0,0 +1,598 @@
+
+.. _`tbreportdemo`:
+
+Demo of Python failure reports with pytest
+==================================================
+
+Here is a nice run of several tens of failures
+and how ``pytest`` presents things (unfortunately
+not showing the nice colors here in the HTML that you
+get on the terminal - we are working on that):
+
+.. code-block:: python
+
+ assertion $ py.test failure_demo.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/assertion, inifile:
+ collected 42 items
+
+ failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
+
+ ======= FAILURES ========
+ _______ test_generative[0] ________
+
+ param1 = 3, param2 = 6
+
+ def test_generative(param1, param2):
+ > assert param1 * 2 < param2
+ E assert (3 * 2) < 6
+
+ failure_demo.py:16: AssertionError
+ _______ TestFailing.test_simple ________
+
+ self = <failure_demo.TestFailing object at 0xdeadbeef>
+
+ def test_simple(self):
+ def f():
+ return 42
+ def g():
+ return 43
+
+ > assert f() == g()
+ E assert 42 == 43
+ E + where 42 = <function TestFailing.test_simple.<locals>.f at 0xdeadbeef>()
+ E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
+
+ failure_demo.py:29: AssertionError
+ _______ TestFailing.test_simple_multiline ________
+
+ self = <failure_demo.TestFailing object at 0xdeadbeef>
+
+ def test_simple_multiline(self):
+ otherfunc_multi(
+ 42,
+ > 6*9)
+
+ failure_demo.py:34:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ a = 42, b = 54
+
+ def otherfunc_multi(a,b):
+ > assert (a ==
+ b)
+ E assert 42 == 54
+
+ failure_demo.py:12: AssertionError
+ _______ TestFailing.test_not ________
+
+ self = <failure_demo.TestFailing object at 0xdeadbeef>
+
+ def test_not(self):
+ def f():
+ return 42
+ > assert not f()
+ E assert not 42
+ E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
+
+ failure_demo.py:39: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_text(self):
+ > assert 'spam' == 'eggs'
+ E assert 'spam' == 'eggs'
+ E - spam
+ E + eggs
+
+ failure_demo.py:43: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_similar_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_similar_text(self):
+ > assert 'foo 1 bar' == 'foo 2 bar'
+ E assert 'foo 1 bar' == 'foo 2 bar'
+ E - foo 1 bar
+ E ? ^
+ E + foo 2 bar
+ E ? ^
+
+ failure_demo.py:46: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_multiline_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_multiline_text(self):
+ > assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+ E assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+ E foo
+ E - spam
+ E + eggs
+ E bar
+
+ failure_demo.py:49: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_long_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_long_text(self):
+ a = '1'*100 + 'a' + '2'*100
+ b = '1'*100 + 'b' + '2'*100
+ > assert a == b
+ E assert '111111111111...2222222222222' == '1111111111111...2222222222222'
+ E Skipping 90 identical leading characters in diff, use -v to show
+ E Skipping 91 identical trailing characters in diff, use -v to show
+ E - 1111111111a222222222
+ E ? ^
+ E + 1111111111b222222222
+ E ? ^
+
+ failure_demo.py:54: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_long_text_multiline ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_long_text_multiline(self):
+ a = '1\n'*100 + 'a' + '2\n'*100
+ b = '1\n'*100 + 'b' + '2\n'*100
+ > assert a == b
+ E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
+ E Skipping 190 identical leading characters in diff, use -v to show
+ E Skipping 191 identical trailing characters in diff, use -v to show
+ E 1
+ E 1
+ E 1
+ E 1
+ E 1
+ E - a2
+ E + b2
+ E 2
+ E 2
+ E 2
+ E 2
+
+ failure_demo.py:59: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_list ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_list(self):
+ > assert [0, 1, 2] == [0, 1, 3]
+ E assert [0, 1, 2] == [0, 1, 3]
+ E At index 2 diff: 2 != 3
+ E Use -v to get the full diff
+
+ failure_demo.py:62: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_list_long ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_list_long(self):
+ a = [0]*100 + [1] + [3]*100
+ b = [0]*100 + [2] + [3]*100
+ > assert a == b
+ E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...]
+ E At index 100 diff: 1 != 2
+ E Use -v to get the full diff
+
+ failure_demo.py:67: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_dict ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_dict(self):
+ > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
+ E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
+ E Omitting 1 identical items, use -v to show
+ E Differing items:
+ E {'b': 1} != {'b': 2}
+ E Left contains more items:
+ E {'c': 0}
+ E Right contains more items:
+ E {'d': 0}
+ E Use -v to get the full diff
+
+ failure_demo.py:70: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_set ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_set(self):
+ > assert set([0, 10, 11, 12]) == set([0, 20, 21])
+ E assert set([0, 10, 11, 12]) == set([0, 20, 21])
+ E Extra items in the left set:
+ E 10
+ E 11
+ E 12
+ E Extra items in the right set:
+ E 20
+ E 21
+ E Use -v to get the full diff
+
+ failure_demo.py:73: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_longer_list ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_longer_list(self):
+ > assert [1,2] == [1,2,3]
+ E assert [1, 2] == [1, 2, 3]
+ E Right contains more items, first extra item: 3
+ E Use -v to get the full diff
+
+ failure_demo.py:76: AssertionError
+ _______ TestSpecialisedExplanations.test_in_list ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_in_list(self):
+ > assert 1 in [0, 2, 3, 4, 5]
+ E assert 1 in [0, 2, 3, 4, 5]
+
+ failure_demo.py:79: AssertionError
+ _______ TestSpecialisedExplanations.test_not_in_text_multiline ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_multiline(self):
+ text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
+ > assert 'foo' not in text
+ E assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail'
+ E 'foo' is contained here:
+ E some multiline
+ E text
+ E which
+ E includes foo
+ E ? +++
+ E and a
+ E tail
+
+ failure_demo.py:83: AssertionError
+ _______ TestSpecialisedExplanations.test_not_in_text_single ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_single(self):
+ text = 'single foo line'
+ > assert 'foo' not in text
+ E assert 'foo' not in 'single foo line'
+ E 'foo' is contained here:
+ E single foo line
+ E ? +++
+
+ failure_demo.py:87: AssertionError
+ _______ TestSpecialisedExplanations.test_not_in_text_single_long ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_single_long(self):
+ text = 'head ' * 50 + 'foo ' + 'tail ' * 20
+ > assert 'foo' not in text
+ E assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
+ E 'foo' is contained here:
+ E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
+ E ? +++
+
+ failure_demo.py:91: AssertionError
+ ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_single_long_term(self):
+ text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
+ > assert 'f'*70 not in text
+ E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
+ E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
+ E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
+ E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ failure_demo.py:95: AssertionError
+ _______ test_attribute ________
+
+ def test_attribute():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ > assert i.b == 2
+ E assert 1 == 2
+ E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
+
+ failure_demo.py:102: AssertionError
+ _______ test_attribute_instance ________
+
+ def test_attribute_instance():
+ class Foo(object):
+ b = 1
+ > assert Foo().b == 2
+ E assert 1 == 2
+ E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef>.b
+ E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
+
+ failure_demo.py:108: AssertionError
+ _______ test_attribute_failure ________
+
+ def test_attribute_failure():
+ class Foo(object):
+ def _get_b(self):
+ raise Exception('Failed to get attrib')
+ b = property(_get_b)
+ i = Foo()
+ > assert i.b == 2
+
+ failure_demo.py:117:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0xdeadbeef>
+
+ def _get_b(self):
+ > raise Exception('Failed to get attrib')
+ E Exception: Failed to get attrib
+
+ failure_demo.py:114: Exception
+ _______ test_attribute_multiple ________
+
+ def test_attribute_multiple():
+ class Foo(object):
+ b = 1
+ class Bar(object):
+ b = 2
+ > assert Foo().b == Bar().b
+ E assert 1 == 2
+ E + where 1 = <failure_demo.test_attribute_multiple.<locals>.Foo object at 0xdeadbeef>.b
+ E + where <failure_demo.test_attribute_multiple.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Foo'>()
+ E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef>.b
+ E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
+
+ failure_demo.py:125: AssertionError
+ _______ TestRaises.test_raises ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_raises(self):
+ s = 'qwe'
+ > raises(TypeError, "int(s)")
+
+ failure_demo.py:134:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ > int(s)
+ E ValueError: invalid literal for int() with base 10: 'qwe'
+
+ <0-codegen $PYTHON_PREFIX/lib/python3.4/site-packages/_pytest/python.py:1302>:1: ValueError
+ _______ TestRaises.test_raises_doesnt ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_raises_doesnt(self):
+ > raises(IOError, "int('3')")
+ E Failed: DID NOT RAISE <class 'OSError'>
+
+ failure_demo.py:137: Failed
+ _______ TestRaises.test_raise ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_raise(self):
+ > raise ValueError("demo error")
+ E ValueError: demo error
+
+ failure_demo.py:140: ValueError
+ _______ TestRaises.test_tupleerror ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_tupleerror(self):
+ > a,b = [1]
+ E ValueError: need more than 1 value to unpack
+
+ failure_demo.py:143: ValueError
+ ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
+ l = [1,2,3]
+ print ("l is %r" % l)
+ > a,b = l.pop()
+ E TypeError: 'int' object is not iterable
+
+ failure_demo.py:148: TypeError
+ --------------------------- Captured stdout call ---------------------------
+ l is [1, 2, 3]
+ _______ TestRaises.test_some_error ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_some_error(self):
+ > if namenotexi:
+ E NameError: name 'namenotexi' is not defined
+
+ failure_demo.py:151: NameError
+ _______ test_dynamic_compile_shows_nicely ________
+
+ def test_dynamic_compile_shows_nicely():
+ src = 'def foo():\n assert 1 == 0\n'
+ name = 'abc-123'
+ module = py.std.imp.new_module(name)
+ code = _pytest._code.compile(src, name, 'exec')
+ py.builtin.exec_(code, module.__dict__)
+ py.std.sys.modules[name] = module
+ > module.foo()
+
+ failure_demo.py:166:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ def foo():
+ > assert 1 == 0
+ E assert 1 == 0
+
+ <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:163>:2: AssertionError
+ _______ TestMoreErrors.test_complex_error ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_complex_error(self):
+ def f():
+ return 44
+ def g():
+ return 43
+ > somefunc(f(), g())
+
+ failure_demo.py:176:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+ failure_demo.py:9: in somefunc
+ otherfunc(x,y)
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ a = 44, b = 43
+
+ def otherfunc(a,b):
+ > assert a==b
+ E assert 44 == 43
+
+ failure_demo.py:6: AssertionError
+ _______ TestMoreErrors.test_z1_unpack_error ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_z1_unpack_error(self):
+ l = []
+ > a,b = l
+ E ValueError: need more than 0 values to unpack
+
+ failure_demo.py:180: ValueError
+ _______ TestMoreErrors.test_z2_type_error ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_z2_type_error(self):
+ l = 3
+ > a,b = l
+ E TypeError: 'int' object is not iterable
+
+ failure_demo.py:184: TypeError
+ _______ TestMoreErrors.test_startswith ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_startswith(self):
+ s = "123"
+ g = "456"
+ > assert s.startswith(g)
+ E assert <built-in method startswith of str object at 0xdeadbeef>('456')
+ E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
+
+ failure_demo.py:189: AssertionError
+ _______ TestMoreErrors.test_startswith_nested ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_startswith_nested(self):
+ def f():
+ return "123"
+ def g():
+ return "456"
+ > assert f().startswith(g())
+ E assert <built-in method startswith of str object at 0xdeadbeef>('456')
+ E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
+ E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0xdeadbeef>()
+ E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
+
+ failure_demo.py:196: AssertionError
+ _______ TestMoreErrors.test_global_func ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_global_func(self):
+ > assert isinstance(globf(42), float)
+ E assert isinstance(43, float)
+ E + where 43 = globf(42)
+
+ failure_demo.py:199: AssertionError
+ _______ TestMoreErrors.test_instance ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_instance(self):
+ self.x = 6*7
+ > assert self.x != 42
+ E assert 42 != 42
+ E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
+
+ failure_demo.py:203: AssertionError
+ _______ TestMoreErrors.test_compare ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_compare(self):
+ > assert globf(10) < 5
+ E assert 11 < 5
+ E + where 11 = globf(10)
+
+ failure_demo.py:206: AssertionError
+ _______ TestMoreErrors.test_try_finally ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_try_finally(self):
+ x = 1
+ try:
+ > assert x == 0
+ E assert 1 == 0
+
+ failure_demo.py:211: AssertionError
+ _______ TestCustomAssertMsg.test_single_line ________
+
+ self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
+
+ def test_single_line(self):
+ class A:
+ a = 1
+ b = 2
+ > assert A.a == b, "A.a appears not to be b"
+ E AssertionError: A.a appears not to be b
+ E assert 1 == 2
+ E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
+
+ failure_demo.py:222: AssertionError
+ _______ TestCustomAssertMsg.test_multiline ________
+
+ self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
+
+ def test_multiline(self):
+ class A:
+ a = 1
+ b = 2
+ > assert A.a == b, "A.a appears not to be b\n" \
+ "or does not appear to be b\none of those"
+ E AssertionError: A.a appears not to be b
+ E or does not appear to be b
+ E one of those
+ E assert 1 == 2
+ E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
+
+ failure_demo.py:228: AssertionError
+ _______ TestCustomAssertMsg.test_custom_repr ________
+
+ self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
+
+ def test_custom_repr(self):
+ class JSON:
+ a = 1
+ def __repr__(self):
+ return "This is JSON\n{\n 'foo': 'bar'\n}"
+ a = JSON()
+ b = 2
+ > assert a.a == b, a
+ E AssertionError: This is JSON
+ E {
+ E 'foo': 'bar'
+ E }
+ E assert 1 == 2
+ E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
+
+ failure_demo.py:238: AssertionError
+ ======= 42 failed in 0.12 seconds ========
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst
new file mode 100644
index 000000000..be12d2afe
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst
@@ -0,0 +1,751 @@
+
+.. highlightlang:: python
+
+Basic patterns and examples
+==========================================================
+
+Pass different values to a test function, depending on command line options
+----------------------------------------------------------------------------
+
+.. regendoc:wipe
+
+Suppose we want to write a test that depends on a command line option.
+Here is a basic pattern to achieve this::
+
+ # content of test_sample.py
+ def test_answer(cmdopt):
+ if cmdopt == "type1":
+ print ("first")
+ elif cmdopt == "type2":
+ print ("second")
+ assert 0 # to see what was printed
+
+
+For this to work we need to add a command line option and
+provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`::
+
+ # content of conftest.py
+ import pytest
+
+ def pytest_addoption(parser):
+ parser.addoption("--cmdopt", action="store", default="type1",
+ help="my option: type1 or type2")
+
+ @pytest.fixture
+ def cmdopt(request):
+ return request.config.getoption("--cmdopt")
+
+Let's run this without supplying our new option::
+
+ $ py.test -q test_sample.py
+ F
+ ======= FAILURES ========
+ _______ test_answer ________
+
+ cmdopt = 'type1'
+
+ def test_answer(cmdopt):
+ if cmdopt == "type1":
+ print ("first")
+ elif cmdopt == "type2":
+ print ("second")
+ > assert 0 # to see what was printed
+ E assert 0
+
+ test_sample.py:6: AssertionError
+ --------------------------- Captured stdout call ---------------------------
+ first
+ 1 failed in 0.12 seconds
+
+And now with supplying a command line option::
+
+ $ py.test -q --cmdopt=type2
+ F
+ ======= FAILURES ========
+ _______ test_answer ________
+
+ cmdopt = 'type2'
+
+ def test_answer(cmdopt):
+ if cmdopt == "type1":
+ print ("first")
+ elif cmdopt == "type2":
+ print ("second")
+ > assert 0 # to see what was printed
+ E assert 0
+
+ test_sample.py:6: AssertionError
+ --------------------------- Captured stdout call ---------------------------
+ second
+ 1 failed in 0.12 seconds
+
+You can see that the command line option arrived in our test. This
+completes the basic pattern. However, one often rather wants to process
+command line options outside of the test and rather pass in different or
+more complex objects.
+
+Dynamically adding command line options
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Through :confval:`addopts` you can statically add command line
+options for your project. You can also dynamically modify
+the command line arguments before they get processed::
+
+ # content of conftest.py
+ import sys
+ def pytest_cmdline_preparse(args):
+ if 'xdist' in sys.modules: # pytest-xdist plugin
+ import multiprocessing
+ num = max(multiprocessing.cpu_count() / 2, 1)
+ args[:] = ["-n", str(num)] + args
+
+If you have the :ref:`xdist plugin <xdist>` installed
+you will now always perform test runs using a number
+of subprocesses close to your CPU. Running in an empty
+directory with the above conftest.py::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. _`excontrolskip`:
+
+Control skipping of tests according to command line option
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Here is a ``conftest.py`` file adding a ``--runslow`` command
+line option to control skipping of ``slow`` marked tests::
+
+ # content of conftest.py
+
+ import pytest
+ def pytest_addoption(parser):
+ parser.addoption("--runslow", action="store_true",
+ help="run slow tests")
+
+We can now write a test module like this::
+
+ # content of test_module.py
+
+ import pytest
+
+
+ slow = pytest.mark.skipif(
+ not pytest.config.getoption("--runslow"),
+ reason="need --runslow option to run"
+ )
+
+
+ def test_func_fast():
+ pass
+
+
+ @slow
+ def test_func_slow():
+ pass
+
+and when running it will see a skipped "slow" test::
+
+ $ py.test -rs # "-rs" means report details on the little 's'
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py .s
+ ======= short test summary info ========
+ SKIP [1] test_module.py:14: need --runslow option to run
+
+ ======= 1 passed, 1 skipped in 0.12 seconds ========
+
+Or run it including the ``slow`` marked test::
+
+ $ py.test --runslow
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py ..
+
+ ======= 2 passed in 0.12 seconds ========
+
+Writing well integrated assertion helpers
+--------------------------------------------------
+
+.. regendoc:wipe
+
+If you have a test helper function called from a test you can
+use the ``pytest.fail`` marker to fail a test with a certain message.
+The test support function will not show up in the traceback if you
+set the ``__tracebackhide__`` option somewhere in the helper function.
+Example::
+
+ # content of test_checkconfig.py
+ import pytest
+ def checkconfig(x):
+ __tracebackhide__ = True
+ if not hasattr(x, "config"):
+ pytest.fail("not configured: %s" %(x,))
+
+ def test_something():
+ checkconfig(42)
+
+The ``__tracebackhide__`` setting influences ``pytest`` showing
+of tracebacks: the ``checkconfig`` function will not be shown
+unless the ``--fulltrace`` command line option is specified.
+Let's run our little function::
+
+ $ py.test -q test_checkconfig.py
+ F
+ ======= FAILURES ========
+ _______ test_something ________
+
+ def test_something():
+ > checkconfig(42)
+ E Failed: not configured: 42
+
+ test_checkconfig.py:8: Failed
+ 1 failed in 0.12 seconds
+
+Detect if running from within a pytest run
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Usually it is a bad idea to make application code
+behave differently if called from a test. But if you
+absolutely must find out if your application code is
+running from a test you can do something like this::
+
+ # content of conftest.py
+
+ def pytest_configure(config):
+ import sys
+ sys._called_from_test = True
+
+ def pytest_unconfigure(config):
+ del sys._called_from_test
+
+and then check for the ``sys._called_from_test`` flag::
+
+ if hasattr(sys, '_called_from_test'):
+ # called from within a test run
+ else:
+ # called "normally"
+
+accordingly in your application. It's also a good idea
+to use your own application module rather than ``sys``
+for handling flag.
+
+Adding info to test report header
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+It's easy to present extra information in a ``pytest`` run::
+
+ # content of conftest.py
+
+ def pytest_report_header(config):
+ return "project deps: mylib-1.1"
+
+which will add the string to the test header accordingly::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ project deps: mylib-1.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. regendoc:wipe
+
+You can also return a list of strings which will be considered as several
+lines of information. You can of course also make the amount of reporting
+information on e.g. the value of ``config.option.verbose`` so that
+you present more information appropriately::
+
+ # content of conftest.py
+
+ def pytest_report_header(config):
+ if config.option.verbose > 0:
+ return ["info1: did you know that ...", "did you?"]
+
+which will add info only when run with "--v"::
+
+ $ py.test -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ info1: did you know that ...
+ did you?
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+and nothing when run plainly::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+profiling test duration
+--------------------------
+
+.. regendoc:wipe
+
+.. versionadded: 2.2
+
+If you have a slow running large test suite you might want to find
+out which tests are the slowest. Let's make an artificial test suite::
+
+ # content of test_some_are_slow.py
+
+ import time
+
+ def test_funcfast():
+ pass
+
+ def test_funcslow1():
+ time.sleep(0.1)
+
+ def test_funcslow2():
+ time.sleep(0.2)
+
+Now we can profile which test functions execute the slowest::
+
+ $ py.test --durations=3
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 3 items
+
+ test_some_are_slow.py ...
+
+ ======= slowest 3 test durations ========
+ 0.20s call test_some_are_slow.py::test_funcslow2
+ 0.10s call test_some_are_slow.py::test_funcslow1
+ 0.00s setup test_some_are_slow.py::test_funcfast
+ ======= 3 passed in 0.12 seconds ========
+
+incremental testing - test steps
+---------------------------------------------------
+
+.. regendoc:wipe
+
+Sometimes you may have a testing situation which consists of a series
+of test steps. If one step fails it makes no sense to execute further
+steps as they are all expected to fail anyway and their tracebacks
+add no insight. Here is a simple ``conftest.py`` file which introduces
+an ``incremental`` marker which is to be used on classes::
+
+ # content of conftest.py
+
+ import pytest
+
+ def pytest_runtest_makereport(item, call):
+ if "incremental" in item.keywords:
+ if call.excinfo is not None:
+ parent = item.parent
+ parent._previousfailed = item
+
+ def pytest_runtest_setup(item):
+ if "incremental" in item.keywords:
+ previousfailed = getattr(item.parent, "_previousfailed", None)
+ if previousfailed is not None:
+ pytest.xfail("previous test failed (%s)" %previousfailed.name)
+
+These two hook implementations work together to abort incremental-marked
+tests in a class. Here is a test module example::
+
+ # content of test_step.py
+
+ import pytest
+
+ @pytest.mark.incremental
+ class TestUserHandling:
+ def test_login(self):
+ pass
+ def test_modification(self):
+ assert 0
+ def test_deletion(self):
+ pass
+
+ def test_normal():
+ pass
+
+If we run this::
+
+ $ py.test -rx
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_step.py .Fx.
+ ======= short test summary info ========
+ XFAIL test_step.py::TestUserHandling::()::test_deletion
+ reason: previous test failed (test_modification)
+
+ ======= FAILURES ========
+ _______ TestUserHandling.test_modification ________
+
+ self = <test_step.TestUserHandling object at 0xdeadbeef>
+
+ def test_modification(self):
+ > assert 0
+ E assert 0
+
+ test_step.py:9: AssertionError
+ ======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ========
+
+We'll see that ``test_deletion`` was not executed because ``test_modification``
+failed. It is reported as an "expected failure".
+
+
+Package/Directory-level fixtures (setups)
+-------------------------------------------------------
+
+If you have nested test directories, you can have per-directory fixture scopes
+by placing fixture functions in a ``conftest.py`` file in that directory
+You can use all types of fixtures including :ref:`autouse fixtures
+<autouse fixtures>` which are the equivalent of xUnit's setup/teardown
+concept. It's however recommended to have explicit fixture references in your
+tests or test classes rather than relying on implicitly executing
+setup/teardown functions, especially if they are far away from the actual tests.
+
+Here is a an example for making a ``db`` fixture available in a directory::
+
+ # content of a/conftest.py
+ import pytest
+
+ class DB:
+ pass
+
+ @pytest.fixture(scope="session")
+ def db():
+ return DB()
+
+and then a test module in that directory::
+
+ # content of a/test_db.py
+ def test_a1(db):
+ assert 0, db # to show value
+
+another test module::
+
+ # content of a/test_db2.py
+ def test_a2(db):
+ assert 0, db # to show value
+
+and then a module in a sister directory which will not see
+the ``db`` fixture::
+
+ # content of b/test_error.py
+ def test_root(db): # no db here, will error out
+ pass
+
+We can run this::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 7 items
+
+ test_step.py .Fx.
+ a/test_db.py F
+ a/test_db2.py F
+ b/test_error.py E
+
+ ======= ERRORS ========
+ _______ ERROR at setup of test_root ________
+ file $REGENDOC_TMPDIR/b/test_error.py, line 1
+ def test_root(db): # no db here, will error out
+ fixture 'db' not found
+ available fixtures: record_xml_property, recwarn, cache, capsys, pytestconfig, tmpdir_factory, capfd, monkeypatch, tmpdir
+ use 'py.test --fixtures [testpath]' for help on them.
+
+ $REGENDOC_TMPDIR/b/test_error.py:1
+ ======= FAILURES ========
+ _______ TestUserHandling.test_modification ________
+
+ self = <test_step.TestUserHandling object at 0xdeadbeef>
+
+ def test_modification(self):
+ > assert 0
+ E assert 0
+
+ test_step.py:9: AssertionError
+ _______ test_a1 ________
+
+ db = <conftest.DB object at 0xdeadbeef>
+
+ def test_a1(db):
+ > assert 0, db # to show value
+ E AssertionError: <conftest.DB object at 0xdeadbeef>
+ E assert 0
+
+ a/test_db.py:2: AssertionError
+ _______ test_a2 ________
+
+ db = <conftest.DB object at 0xdeadbeef>
+
+ def test_a2(db):
+ > assert 0, db # to show value
+ E AssertionError: <conftest.DB object at 0xdeadbeef>
+ E assert 0
+
+ a/test_db2.py:2: AssertionError
+ ======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========
+
+The two test modules in the ``a`` directory see the same ``db`` fixture instance
+while the one test in the sister-directory ``b`` doesn't see it. We could of course
+also define a ``db`` fixture in that sister directory's ``conftest.py`` file.
+Note that each fixture is only instantiated if there is a test actually needing
+it (unless you use "autouse" fixture which are always executed ahead of the first test
+executing).
+
+
+post-process test reports / failures
+---------------------------------------
+
+If you want to postprocess test reports and need access to the executing
+environment you can implement a hook that gets called when the test
+"report" object is about to be created. Here we write out all failing
+test calls and also access a fixture (if it was used by the test) in
+case you want to query/look at it during your post processing. In our
+case we just write some informations out to a ``failures`` file::
+
+ # content of conftest.py
+
+ import pytest
+ import os.path
+
+ @pytest.hookimpl(tryfirst=True, hookwrapper=True)
+ def pytest_runtest_makereport(item, call):
+ # execute all other hooks to obtain the report object
+ outcome = yield
+ rep = outcome.get_result()
+
+ # we only look at actual failing test calls, not setup/teardown
+ if rep.when == "call" and rep.failed:
+ mode = "a" if os.path.exists("failures") else "w"
+ with open("failures", mode) as f:
+ # let's also access a fixture for the fun of it
+ if "tmpdir" in item.fixturenames:
+ extra = " (%s)" % item.funcargs["tmpdir"]
+ else:
+ extra = ""
+
+ f.write(rep.nodeid + extra + "\n")
+
+
+if you then have failing tests::
+
+ # content of test_module.py
+ def test_fail1(tmpdir):
+ assert 0
+ def test_fail2():
+ assert 0
+
+and run them::
+
+ $ py.test test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py FF
+
+ ======= FAILURES ========
+ _______ test_fail1 ________
+
+ tmpdir = local('PYTEST_TMPDIR/test_fail10')
+
+ def test_fail1(tmpdir):
+ > assert 0
+ E assert 0
+
+ test_module.py:2: AssertionError
+ _______ test_fail2 ________
+
+ def test_fail2():
+ > assert 0
+ E assert 0
+
+ test_module.py:4: AssertionError
+ ======= 2 failed in 0.12 seconds ========
+
+you will have a "failures" file which contains the failing test ids::
+
+ $ cat failures
+ test_module.py::test_fail1 (PYTEST_TMPDIR/test_fail10)
+ test_module.py::test_fail2
+
+Making test result information available in fixtures
+-----------------------------------------------------------
+
+.. regendoc:wipe
+
+If you want to make test result reports available in fixture finalizers
+here is a little example implemented via a local plugin::
+
+ # content of conftest.py
+
+ import pytest
+
+ @pytest.hookimpl(tryfirst=True, hookwrapper=True)
+ def pytest_runtest_makereport(item, call):
+ # execute all other hooks to obtain the report object
+ outcome = yield
+ rep = outcome.get_result()
+
+ # set an report attribute for each phase of a call, which can
+ # be "setup", "call", "teardown"
+
+ setattr(item, "rep_" + rep.when, rep)
+
+
+ @pytest.fixture
+ def something(request):
+ def fin():
+ # request.node is an "item" because we use the default
+ # "function" scope
+ if request.node.rep_setup.failed:
+ print ("setting up a test failed!", request.node.nodeid)
+ elif request.node.rep_setup.passed:
+ if request.node.rep_call.failed:
+ print ("executing test failed", request.node.nodeid)
+ request.addfinalizer(fin)
+
+
+if you then have failing tests::
+
+ # content of test_module.py
+
+ import pytest
+
+ @pytest.fixture
+ def other():
+ assert 0
+
+ def test_setup_fails(something, other):
+ pass
+
+ def test_call_fails(something):
+ assert 0
+
+ def test_fail2():
+ assert 0
+
+and run it::
+
+ $ py.test -s test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 3 items
+
+ test_module.py Esetting up a test failed! test_module.py::test_setup_fails
+ Fexecuting test failed test_module.py::test_call_fails
+ F
+
+ ======= ERRORS ========
+ _______ ERROR at setup of test_setup_fails ________
+
+ @pytest.fixture
+ def other():
+ > assert 0
+ E assert 0
+
+ test_module.py:6: AssertionError
+ ======= FAILURES ========
+ _______ test_call_fails ________
+
+ something = None
+
+ def test_call_fails(something):
+ > assert 0
+ E assert 0
+
+ test_module.py:12: AssertionError
+ _______ test_fail2 ________
+
+ def test_fail2():
+ > assert 0
+ E assert 0
+
+ test_module.py:15: AssertionError
+ ======= 2 failed, 1 error in 0.12 seconds ========
+
+You'll see that the fixture finalizers could use the precise reporting
+information.
+
+Integrating pytest runner and cx_freeze
+-----------------------------------------------------------
+
+If you freeze your application using a tool like
+`cx_freeze <http://cx-freeze.readthedocs.org>`_ in order to distribute it
+to your end-users, it is a good idea to also package your test runner and run
+your tests using the frozen application.
+
+This way packaging errors such as dependencies not being
+included into the executable can be detected early while also allowing you to
+send test files to users so they can run them in their machines, which can be
+invaluable to obtain more information about a hard to reproduce bug.
+
+Unfortunately ``cx_freeze`` can't discover them
+automatically because of ``pytest``'s use of dynamic module loading, so you
+must declare them explicitly by using ``pytest.freeze_includes()``::
+
+ # contents of setup.py
+ from cx_Freeze import setup, Executable
+ import pytest
+
+ setup(
+ name="app_main",
+ executables=[Executable("app_main.py")],
+ options={"build_exe":
+ {
+ 'includes': pytest.freeze_includes()}
+ },
+ # ... other options
+ )
+
+If you don't want to ship a different executable just in order to run your tests,
+you can make your program check for a certain flag and pass control
+over to ``pytest`` instead. For example::
+
+ # contents of app_main.py
+ import sys
+
+ if len(sys.argv) > 1 and sys.argv[1] == '--pytest':
+ import pytest
+ sys.exit(pytest.main(sys.argv[2:]))
+ else:
+ # normal application execution: at this point argv can be parsed
+ # by your argument-parsing library of choice as usual
+ ...
+
+This makes it convenient to execute your tests from within your frozen
+application, using standard ``py.test`` command-line options::
+
+ ./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/special.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/special.rst
new file mode 100644
index 000000000..58e66d44e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/special.rst
@@ -0,0 +1,72 @@
+A session-fixture which can look at all collected tests
+----------------------------------------------------------------
+
+A session-scoped fixture effectively has access to all
+collected test items. Here is an example of a fixture
+function which walks all collected tests and looks
+if their test class defines a ``callme`` method and
+calls it::
+
+ # content of conftest.py
+
+ import pytest
+
+ @pytest.fixture(scope="session", autouse=True)
+ def callattr_ahead_of_alltests(request):
+ print ("callattr_ahead_of_alltests called")
+ seen = set([None])
+ session = request.node
+ for item in session.items:
+ cls = item.getparent(pytest.Class)
+ if cls not in seen:
+ if hasattr(cls.obj, "callme"):
+ cls.obj.callme()
+ seen.add(cls)
+
+test classes may now define a ``callme`` method which
+will be called ahead of running any tests::
+
+ # content of test_module.py
+
+ class TestHello:
+ @classmethod
+ def callme(cls):
+ print ("callme called!")
+
+ def test_method1(self):
+ print ("test_method1 called")
+
+ def test_method2(self):
+ print ("test_method1 called")
+
+ class TestOther:
+ @classmethod
+ def callme(cls):
+ print ("callme other called")
+ def test_other(self):
+ print ("test other")
+
+ # works with unittest as well ...
+ import unittest
+
+ class SomeTest(unittest.TestCase):
+ @classmethod
+ def callme(self):
+ print ("SomeTest callme called")
+
+ def test_unit1(self):
+ print ("test_unit1 method called")
+
+If you run this without output capturing::
+
+ $ py.test -q -s test_module.py
+ callattr_ahead_of_alltests called
+ callme called!
+ callme other called
+ SomeTest callme called
+ test_method1 called
+ .test_method1 called
+ .test other
+ .test_unit1 method called
+ .
+ 4 passed in 0.12 seconds
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py b/testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py
new file mode 100644
index 000000000..5648575e8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py
@@ -0,0 +1,30 @@
+import pytest
+xfail = pytest.mark.xfail
+
+@xfail
+def test_hello():
+ assert 0
+
+@xfail(run=False)
+def test_hello2():
+ assert 0
+
+@xfail("hasattr(os, 'sep')")
+def test_hello3():
+ assert 0
+
+@xfail(reason="bug 110")
+def test_hello4():
+ assert 0
+
+@xfail('pytest.__version__[0] != "17"')
+def test_hello5():
+ assert 0
+
+def test_hello6():
+ pytest.xfail("reason")
+
+@xfail(raises=IndexError)
+def test_hello7():
+ x = []
+ x[1] = 1
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/faq.rst b/testing/web-platform/tests/tools/pytest/doc/en/faq.rst
new file mode 100644
index 000000000..fd7ca35e9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/faq.rst
@@ -0,0 +1,165 @@
+Some Issues and Questions
+==================================
+
+.. note::
+
+ This FAQ is here only mostly for historic reasons. Checkout
+ `pytest Q&A at Stackoverflow <http://stackoverflow.com/search?q=pytest>`_
+ for many questions and answers related to pytest and/or use
+ :ref:`contact channels` to get help.
+
+On naming, nosetests, licensing and magic
+------------------------------------------------
+
+How does pytest relate to nose and unittest?
++++++++++++++++++++++++++++++++++++++++++++++++++
+
+``pytest`` and nose_ share basic philosophy when it comes
+to running and writing Python tests. In fact, you can run many tests
+written for nose with ``pytest``. nose_ was originally created
+as a clone of ``pytest`` when ``pytest`` was in the ``0.8`` release
+cycle. Note that starting with pytest-2.0 support for running unittest
+test suites is majorly improved.
+
+how does pytest relate to twisted's trial?
+++++++++++++++++++++++++++++++++++++++++++++++
+
+Since some time ``pytest`` has builtin support for supporting tests
+written using trial. It does not itself start a reactor, however,
+and does not handle Deferreds returned from a test in pytest style.
+If you are using trial's unittest.TestCase chances are that you can
+just run your tests even if you return Deferreds. In addition,
+there also is a dedicated `pytest-twisted
+<http://pypi.python.org/pypi/pytest-twisted>`_ plugin which allows you to
+return deferreds from pytest-style tests, allowing the use of
+:ref:`fixtures` and other features.
+
+how does pytest work with Django?
+++++++++++++++++++++++++++++++++++++++++++++++
+
+In 2012, some work is going into the `pytest-django plugin <http://pypi.python.org/pypi/pytest-django>`_. It substitutes the usage of Django's
+``manage.py test`` and allows the use of all pytest features_ most of which
+are not available from Django directly.
+
+.. _features: features.html
+
+
+What's this "magic" with pytest? (historic notes)
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Around 2007 (version ``0.8``) some people thought that ``pytest``
+was using too much "magic". It had been part of the `pylib`_ which
+contains a lot of unrelated python library code. Around 2010 there
+was a major cleanup refactoring, which removed unused or deprecated code
+and resulted in the new ``pytest`` PyPI package which strictly contains
+only test-related code. This release also brought a complete pluginification
+such that the core is around 300 lines of code and everything else is
+implemented in plugins. Thus ``pytest`` today is a small, universally runnable
+and customizable testing framework for Python. Note, however, that
+``pytest`` uses metaprogramming techniques and reading its source is
+thus likely not something for Python beginners.
+
+A second "magic" issue was the assert statement debugging feature.
+Nowadays, ``pytest`` explicitly rewrites assert statements in test modules
+in order to provide more useful :ref:`assert feedback <assertfeedback>`.
+This completely avoids previous issues of confusing assertion-reporting.
+It also means, that you can use Python's ``-O`` optimization without losing
+assertions in test modules.
+
+``pytest`` contains a second, mostly obsolete, assert debugging technique
+invoked via ``--assert=reinterpret``: When an ``assert`` statement fails, ``pytest`` re-interprets
+the expression part to show intermediate values. This technique suffers
+from a caveat that the rewriting does not: If your expression has side
+effects (better to avoid them anyway!) the intermediate values may not
+be the same, confusing the reinterpreter and obfuscating the initial
+error (this is also explained at the command line if it happens).
+
+You can also turn off all assertion interaction using the
+``--assert=plain`` option.
+
+.. _`py namespaces`: index.html
+.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
+
+
+Why a ``py.test`` instead of a ``pytest`` command?
+++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Some of the reasons are historic, others are practical. ``pytest``
+used to be part of the ``py`` package which provided several developer
+utilities, all starting with ``py.<TAB>``, thus providing nice
+TAB-completion. If
+you install ``pip install pycmd`` you get these tools from a separate
+package. These days the command line tool could be called ``pytest``
+but since many people have gotten used to the old name and there
+is another tool named "pytest" we just decided to stick with
+``py.test`` for now.
+
+pytest fixtures, parametrized tests
+-------------------------------------------------------
+
+.. _funcargs: funcargs.html
+
+Is using pytest fixtures versus xUnit setup a style question?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+For simple applications and for people experienced with nose_ or
+unittest-style test setup using `xUnit style setup`_ probably
+feels natural. For larger test suites, parametrized testing
+or setup of complex test resources using fixtures_ may feel more natural.
+Moreover, fixtures are ideal for writing advanced test support
+code (like e.g. the monkeypatch_, the tmpdir_ or capture_ fixtures)
+because the support code can register setup/teardown functions
+in a managed class/module/function scope.
+
+.. _monkeypatch: monkeypatch.html
+.. _tmpdir: tmpdir.html
+.. _capture: capture.html
+.. _fixtures: fixture.html
+
+.. _`why pytest_pyfuncarg__ methods?`:
+
+.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
+
+Can I yield multiple values from a fixture function?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+There are two conceptual reasons why yielding from a factory function
+is not possible:
+
+* If multiple factories yielded values there would
+ be no natural place to determine the combination
+ policy - in real-world examples some combinations
+ often should not run.
+
+* Calling factories for obtaining test function arguments
+ is part of setting up and running a test. At that
+ point it is not possible to add new test calls to
+ the test collection anymore.
+
+However, with pytest-2.3 you can use the :ref:`@pytest.fixture` decorator
+and specify ``params`` so that all tests depending on the factory-created
+resource will run multiple times with different parameters.
+
+You can also use the ``pytest_generate_tests`` hook to
+implement the `parametrization scheme of your choice`_. See also
+:ref:`paramexamples` for more examples.
+
+.. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
+
+pytest interaction with other packages
+---------------------------------------------------
+
+Issues with pytest, multiprocess and setuptools?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+On Windows the multiprocess package will instantiate sub processes
+by pickling and thus implicitly re-import a lot of local modules.
+Unfortunately, setuptools-0.6.11 does not ``if __name__=='__main__'``
+protect its generated command line script. This leads to infinite
+recursion when running a test that instantiates Processes.
+
+As of mid-2013, there shouldn't be a problem anymore when you
+use the standard setuptools (note that distribute has been merged
+back into setuptools which is now shipped directly with virtualenv).
+
+.. include:: links.inc
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/feedback.rst b/testing/web-platform/tests/tools/pytest/doc/en/feedback.rst
new file mode 100644
index 000000000..9c63b7640
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/feedback.rst
@@ -0,0 +1,8 @@
+
+What users say:
+
+ `py.test is pretty much the best thing ever`_ (Alex Gaynor)
+
+
+.. _`py.test is pretty much the best thing ever`_ (Alex Gaynor)
+ http://twitter.com/#!/alex_gaynor/status/22389410366
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/fixture.rst b/testing/web-platform/tests/tools/pytest/doc/en/fixture.rst
new file mode 100644
index 000000000..f48607ae2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/fixture.rst
@@ -0,0 +1,987 @@
+.. _fixture:
+.. _fixtures:
+.. _`fixture functions`:
+
+pytest fixtures: explicit, modular, scalable
+========================================================
+
+.. currentmodule:: _pytest.python
+
+.. versionadded:: 2.0/2.3/2.4
+
+.. _`xUnit`: http://en.wikipedia.org/wiki/XUnit
+.. _`purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software
+.. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection#Definition
+
+The `purpose of test fixtures`_ is to provide a fixed baseline
+upon which tests can reliably and repeatedly execute. pytest fixtures
+offer dramatic improvements over the classic xUnit style of setup/teardown
+functions:
+
+* fixtures have explicit names and are activated by declaring their use
+ from test functions, modules, classes or whole projects.
+
+* fixtures are implemented in a modular manner, as each fixture name
+ triggers a *fixture function* which can itself use other fixtures.
+
+* fixture management scales from simple unit to complex
+ functional testing, allowing to parametrize fixtures and tests according
+ to configuration and component options, or to re-use fixtures
+ across class, module or whole test session scopes.
+
+In addition, pytest continues to support :ref:`xunitsetup`. You can mix
+both styles, moving incrementally from classic to new style, as you
+prefer. You can also start out from existing :ref:`unittest.TestCase
+style <unittest.TestCase>` or :ref:`nose based <nosestyle>` projects.
+
+.. note::
+
+ pytest-2.4 introduced an additional experimental
+ :ref:`yield fixture mechanism <yieldfixture>` for easier context manager
+ integration and more linear writing of teardown code.
+
+.. _`funcargs`:
+.. _`funcarg mechanism`:
+.. _`fixture function`:
+.. _`@pytest.fixture`:
+.. _`pytest.fixture`:
+
+Fixtures as Function arguments
+-----------------------------------------
+
+Test functions can receive fixture objects by naming them as an input
+argument. For each argument name, a fixture function with that name provides
+the fixture object. Fixture functions are registered by marking them with
+:py:func:`@pytest.fixture <_pytest.python.fixture>`. Let's look at a simple
+self-contained test module containing a fixture and a test function
+using it::
+
+ # content of ./test_smtpsimple.py
+ import pytest
+
+ @pytest.fixture
+ def smtp():
+ import smtplib
+ return smtplib.SMTP("smtp.gmail.com")
+
+ def test_ehlo(smtp):
+ response, msg = smtp.ehlo()
+ assert response == 250
+ assert 0 # for demo purposes
+
+Here, the ``test_ehlo`` needs the ``smtp`` fixture value. pytest
+will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
+marked ``smtp`` fixture function. Running the test looks like this::
+
+ $ py.test test_smtpsimple.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_smtpsimple.py F
+
+ ======= FAILURES ========
+ _______ test_ehlo ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_ehlo(smtp):
+ response, msg = smtp.ehlo()
+ assert response == 250
+ > assert 0 # for demo purposes
+ E assert 0
+
+ test_smtpsimple.py:11: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+In the failure traceback we see that the test function was called with a
+``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture
+function. The test function fails on our deliberate ``assert 0``. Here is
+the exact protocol used by ``pytest`` to call the test function this way:
+
+1. pytest :ref:`finds <test discovery>` the ``test_ehlo`` because
+ of the ``test_`` prefix. The test function needs a function argument
+ named ``smtp``. A matching fixture function is discovered by
+ looking for a fixture-marked function named ``smtp``.
+
+2. ``smtp()`` is called to create an instance.
+
+3. ``test_ehlo(<SMTP instance>)`` is called and fails in the last
+ line of the test function.
+
+Note that if you misspell a function argument or want
+to use one that isn't available, you'll see an error
+with a list of available function arguments.
+
+.. Note::
+
+ You can always issue::
+
+ py.test --fixtures test_simplefactory.py
+
+ to see available fixtures.
+
+ In versions prior to 2.3 there was no ``@pytest.fixture`` marker
+ and you had to use a magic ``pytest_funcarg__NAME`` prefix
+ for the fixture factory. This remains and will remain supported
+ but is not anymore advertised as the primary means of declaring fixture
+ functions.
+
+"Funcargs" a prime example of dependency injection
+---------------------------------------------------
+
+When injecting fixtures to test functions, pytest-2.0 introduced the
+term "funcargs" or "funcarg mechanism" which continues to be present
+also in docs today. It now refers to the specific case of injecting
+fixture values as arguments to test functions. With pytest-2.3 there are
+more possibilities to use fixtures but "funcargs" remain as the main way
+as they allow to directly state the dependencies of a test function.
+
+As the following examples show in more detail, funcargs allow test
+functions to easily receive and work against specific pre-initialized
+application objects without having to care about import/setup/cleanup
+details. It's a prime example of `dependency injection`_ where fixture
+functions take the role of the *injector* and test functions are the
+*consumers* of fixture objects.
+
+.. _smtpshared:
+
+Sharing a fixture across tests in a module (or class/session)
+-----------------------------------------------------------------
+
+.. regendoc:wipe
+
+Fixtures requiring network access depend on connectivity and are
+usually time-expensive to create. Extending the previous example, we
+can add a ``scope='module'`` parameter to the
+:py:func:`@pytest.fixture <_pytest.python.fixture>` invocation
+to cause the decorated ``smtp`` fixture function to only be invoked once
+per test module. Multiple test functions in a test module will thus
+each receive the same ``smtp`` fixture instance. The next example puts
+the fixture function into a separate ``conftest.py`` file so
+that tests from multiple test modules in the directory can
+access the fixture function::
+
+ # content of conftest.py
+ import pytest
+ import smtplib
+
+ @pytest.fixture(scope="module")
+ def smtp():
+ return smtplib.SMTP("smtp.gmail.com")
+
+The name of the fixture again is ``smtp`` and you can access its result by
+listing the name ``smtp`` as an input parameter in any test or fixture
+function (in or below the directory where ``conftest.py`` is located)::
+
+ # content of test_module.py
+
+ def test_ehlo(smtp):
+ response, msg = smtp.ehlo()
+ assert response == 250
+ assert b"smtp.gmail.com" in msg
+ assert 0 # for demo purposes
+
+ def test_noop(smtp):
+ response, msg = smtp.noop()
+ assert response == 250
+ assert 0 # for demo purposes
+
+We deliberately insert failing ``assert 0`` statements in order to
+inspect what is going on and can now run the tests::
+
+ $ py.test test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py FF
+
+ ======= FAILURES ========
+ _______ test_ehlo ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_ehlo(smtp):
+ response, msg = smtp.ehlo()
+ assert response == 250
+ assert b"smtp.gmail.com" in msg
+ > assert 0 # for demo purposes
+ E assert 0
+
+ test_module.py:6: AssertionError
+ _______ test_noop ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_noop(smtp):
+ response, msg = smtp.noop()
+ assert response == 250
+ > assert 0 # for demo purposes
+ E assert 0
+
+ test_module.py:11: AssertionError
+ ======= 2 failed in 0.12 seconds ========
+
+You see the two ``assert 0`` failing and more importantly you can also see
+that the same (module-scoped) ``smtp`` object was passed into the two
+test functions because pytest shows the incoming argument values in the
+traceback. As a result, the two test functions using ``smtp`` run as
+quick as a single one because they reuse the same instance.
+
+If you decide that you rather want to have a session-scoped ``smtp``
+instance, you can simply declare it:
+
+.. code-block:: python
+
+ @pytest.fixture(scope="session")
+ def smtp(...):
+ # the returned fixture value will be shared for
+ # all tests needing it
+
+.. _`finalization`:
+
+Fixture finalization / executing teardown code
+-------------------------------------------------------------
+
+pytest supports execution of fixture specific finalization code
+when the fixture goes out of scope. By accepting a ``request`` object
+into your fixture function you can call its ``request.addfinalizer`` one
+or multiple times::
+
+ # content of conftest.py
+
+ import smtplib
+ import pytest
+
+ @pytest.fixture(scope="module")
+ def smtp(request):
+ smtp = smtplib.SMTP("smtp.gmail.com")
+ def fin():
+ print ("teardown smtp")
+ smtp.close()
+ request.addfinalizer(fin)
+ return smtp # provide the fixture value
+
+The ``fin`` function will execute when the last test using
+the fixture in the module has finished execution.
+
+Let's execute it::
+
+ $ py.test -s -q --tb=no
+ FFteardown smtp
+
+ 2 failed in 0.12 seconds
+
+We see that the ``smtp`` instance is finalized after the two
+tests finished execution. Note that if we decorated our fixture
+function with ``scope='function'`` then fixture setup and cleanup would
+occur around each single test. In either case the test
+module itself does not need to change or know about these details
+of fixture setup.
+
+
+.. _`request-context`:
+
+Fixtures can introspect the requesting test context
+-------------------------------------------------------------
+
+Fixture function can accept the :py:class:`request <FixtureRequest>` object
+to introspect the "requesting" test function, class or module context.
+Further extending the previous ``smtp`` fixture example, let's
+read an optional server URL from the test module which uses our fixture::
+
+ # content of conftest.py
+ import pytest
+ import smtplib
+
+ @pytest.fixture(scope="module")
+ def smtp(request):
+ server = getattr(request.module, "smtpserver", "smtp.gmail.com")
+ smtp = smtplib.SMTP(server)
+
+ def fin():
+ print ("finalizing %s (%s)" % (smtp, server))
+ smtp.close()
+ request.addfinalizer(fin)
+ return smtp
+
+We use the ``request.module`` attribute to optionally obtain an
+``smtpserver`` attribute from the test module. If we just execute
+again, nothing much has changed::
+
+ $ py.test -s -q --tb=no
+ FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
+
+ 2 failed in 0.12 seconds
+
+Let's quickly create another test module that actually sets the
+server URL in its module namespace::
+
+ # content of test_anothersmtp.py
+
+ smtpserver = "mail.python.org" # will be read by smtp fixture
+
+ def test_showhelo(smtp):
+ assert 0, smtp.helo()
+
+Running it::
+
+ $ py.test -qq --tb=short test_anothersmtp.py
+ F
+ ======= FAILURES ========
+ _______ test_showhelo ________
+ test_anothersmtp.py:5: in test_showhelo
+ assert 0, smtp.helo()
+ E AssertionError: (250, b'mail.python.org')
+ E assert 0
+
+voila! The ``smtp`` fixture function picked up our mail server name
+from the module namespace.
+
+.. _`fixture-parametrize`:
+
+Parametrizing a fixture
+-----------------------------------------------------------------
+
+Fixture functions can be parametrized in which case they will be called
+multiple times, each time executing the set of dependent tests, i. e. the
+tests that depend on this fixture. Test functions do usually not need
+to be aware of their re-running. Fixture parametrization helps to
+write exhaustive functional tests for components which themselves can be
+configured in multiple ways.
+
+Extending the previous example, we can flag the fixture to create two
+``smtp`` fixture instances which will cause all tests using the fixture
+to run twice. The fixture function gets access to each parameter
+through the special :py:class:`request <FixtureRequest>` object::
+
+ # content of conftest.py
+ import pytest
+ import smtplib
+
+ @pytest.fixture(scope="module",
+ params=["smtp.gmail.com", "mail.python.org"])
+ def smtp(request):
+ smtp = smtplib.SMTP(request.param)
+ def fin():
+ print ("finalizing %s" % smtp)
+ smtp.close()
+ request.addfinalizer(fin)
+ return smtp
+
+The main change is the declaration of ``params`` with
+:py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values
+for each of which the fixture function will execute and can access
+a value via ``request.param``. No test function code needs to change.
+So let's just do another run::
+
+ $ py.test -q test_module.py
+ FFFF
+ ======= FAILURES ========
+ _______ test_ehlo[smtp.gmail.com] ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_ehlo(smtp):
+ response, msg = smtp.ehlo()
+ assert response == 250
+ assert b"smtp.gmail.com" in msg
+ > assert 0 # for demo purposes
+ E assert 0
+
+ test_module.py:6: AssertionError
+ _______ test_noop[smtp.gmail.com] ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_noop(smtp):
+ response, msg = smtp.noop()
+ assert response == 250
+ > assert 0 # for demo purposes
+ E assert 0
+
+ test_module.py:11: AssertionError
+ _______ test_ehlo[mail.python.org] ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_ehlo(smtp):
+ response, msg = smtp.ehlo()
+ assert response == 250
+ > assert b"smtp.gmail.com" in msg
+ E assert b'smtp.gmail.com' in b'mail.python.org\nSIZE 51200000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8'
+
+ test_module.py:5: AssertionError
+ -------------------------- Captured stdout setup ---------------------------
+ finalizing <smtplib.SMTP object at 0xdeadbeef>
+ _______ test_noop[mail.python.org] ________
+
+ smtp = <smtplib.SMTP object at 0xdeadbeef>
+
+ def test_noop(smtp):
+ response, msg = smtp.noop()
+ assert response == 250
+ > assert 0 # for demo purposes
+ E assert 0
+
+ test_module.py:11: AssertionError
+ 4 failed in 0.12 seconds
+
+We see that our two test functions each ran twice, against the different
+``smtp`` instances. Note also, that with the ``mail.python.org``
+connection the second test fails in ``test_ehlo`` because a
+different server string is expected than what arrived.
+
+pytest will build a string that is the test ID for each fixture value
+in a parametrized fixture, e.g. ``test_ehlo[smtp.gmail.com]`` and
+``test_ehlo[mail.python.org]`` in the above examples. These IDs can
+be used with ``-k`` to select specific cases to run, and they will
+also identify the specific case when one is failing. Running pytest
+with ``--collect-only`` will show the generated IDs.
+
+Numbers, strings, booleans and None will have their usual string
+representation used in the test ID. For other objects, pytest will
+make a string based on the argument name. It is possible to customise
+the string used in a test ID for a certain fixture value by using the
+``ids`` keyword argument::
+
+ # content of test_ids.py
+ import pytest
+
+ @pytest.fixture(params=[0, 1], ids=["spam", "ham"])
+ def a(request):
+ return request.param
+
+ def test_a(a):
+ pass
+
+ def idfn(fixture_value):
+ if fixture_value == 0:
+ return "eggs"
+ else:
+ return None
+
+ @pytest.fixture(params=[0, 1], ids=idfn)
+ def b(request):
+ return request.param
+
+ def test_b(b):
+ pass
+
+The above shows how ``ids`` can be either a list of strings to use or
+a function which will be called with the fixture value and then
+has to return a string to use. In the latter case if the function
+return ``None`` then pytest's auto-generated ID will be used.
+
+Running the above tests results in the following test IDs being used::
+
+ $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 10 items
+ <Module 'test_anothersmtp.py'>
+ <Function 'test_showhelo[smtp.gmail.com]'>
+ <Function 'test_showhelo[mail.python.org]'>
+ <Module 'test_ids.py'>
+ <Function 'test_a[spam]'>
+ <Function 'test_a[ham]'>
+ <Function 'test_b[eggs]'>
+ <Function 'test_b[1]'>
+ <Module 'test_module.py'>
+ <Function 'test_ehlo[smtp.gmail.com]'>
+ <Function 'test_noop[smtp.gmail.com]'>
+ <Function 'test_ehlo[mail.python.org]'>
+ <Function 'test_noop[mail.python.org]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. _`interdependent fixtures`:
+
+Modularity: using fixtures from a fixture function
+----------------------------------------------------------
+
+You can not only use fixtures in test functions but fixture functions
+can use other fixtures themselves. This contributes to a modular design
+of your fixtures and allows re-use of framework-specific fixtures across
+many projects. As a simple example, we can extend the previous example
+and instantiate an object ``app`` where we stick the already defined
+``smtp`` resource into it::
+
+ # content of test_appsetup.py
+
+ import pytest
+
+ class App:
+ def __init__(self, smtp):
+ self.smtp = smtp
+
+ @pytest.fixture(scope="module")
+ def app(smtp):
+ return App(smtp)
+
+ def test_smtp_exists(app):
+ assert app.smtp
+
+Here we declare an ``app`` fixture which receives the previously defined
+``smtp`` fixture and instantiates an ``App`` object with it. Let's run it::
+
+ $ py.test -v test_appsetup.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 2 items
+
+ test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED
+ test_appsetup.py::test_smtp_exists[mail.python.org] PASSED
+
+ ======= 2 passed in 0.12 seconds ========
+
+Due to the parametrization of ``smtp`` the test will run twice with two
+different ``App`` instances and respective smtp servers. There is no
+need for the ``app`` fixture to be aware of the ``smtp`` parametrization
+as pytest will fully analyse the fixture dependency graph.
+
+Note, that the ``app`` fixture has a scope of ``module`` and uses a
+module-scoped ``smtp`` fixture. The example would still work if ``smtp``
+was cached on a ``session`` scope: it is fine for fixtures to use
+"broader" scoped fixtures but not the other way round:
+A session-scoped fixture could not use a module-scoped one in a
+meaningful way.
+
+
+.. _`automatic per-resource grouping`:
+
+Automatic grouping of tests by fixture instances
+----------------------------------------------------------
+
+.. regendoc: wipe
+
+pytest minimizes the number of active fixtures during test runs.
+If you have a parametrized fixture, then all the tests using it will
+first execute with one instance and then finalizers are called
+before the next fixture instance is created. Among other things,
+this eases testing of applications which create and use global state.
+
+The following example uses two parametrized funcargs, one of which is
+scoped on a per-module basis, and all the functions perform ``print`` calls
+to show the setup/teardown flow::
+
+ # content of test_module.py
+ import pytest
+
+ @pytest.fixture(scope="module", params=["mod1", "mod2"])
+ def modarg(request):
+ param = request.param
+ print ("create", param)
+ def fin():
+ print ("fin %s" % param)
+ return param
+
+ @pytest.fixture(scope="function", params=[1,2])
+ def otherarg(request):
+ return request.param
+
+ def test_0(otherarg):
+ print (" test0", otherarg)
+ def test_1(modarg):
+ print (" test1", modarg)
+ def test_2(otherarg, modarg):
+ print (" test2", otherarg, modarg)
+
+Let's run the tests in verbose mode and with looking at the print-output::
+
+ $ py.test -v -s test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 8 items
+
+ test_module.py::test_0[1] test0 1
+ PASSED
+ test_module.py::test_0[2] test0 2
+ PASSED
+ test_module.py::test_1[mod1] create mod1
+ test1 mod1
+ PASSED
+ test_module.py::test_2[1-mod1] test2 1 mod1
+ PASSED
+ test_module.py::test_2[2-mod1] test2 2 mod1
+ PASSED
+ test_module.py::test_1[mod2] create mod2
+ test1 mod2
+ PASSED
+ test_module.py::test_2[1-mod2] test2 1 mod2
+ PASSED
+ test_module.py::test_2[2-mod2] test2 2 mod2
+ PASSED
+
+ ======= 8 passed in 0.12 seconds ========
+
+You can see that the parametrized module-scoped ``modarg`` resource caused
+an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed
+before the ``mod2`` resource was setup.
+
+
+.. _`usefixtures`:
+
+Using fixtures from classes, modules or projects
+----------------------------------------------------------------------
+
+.. regendoc:wipe
+
+Sometimes test functions do not directly need access to a fixture object.
+For example, tests may require to operate with an empty directory as the
+current working directory but otherwise do not care for the concrete
+directory. Here is how you can can use the standard `tempfile
+<http://docs.python.org/library/tempfile.html>`_ and pytest fixtures to
+achieve it. We separate the creation of the fixture into a conftest.py
+file::
+
+ # content of conftest.py
+
+ import pytest
+ import tempfile
+ import os
+
+ @pytest.fixture()
+ def cleandir():
+ newpath = tempfile.mkdtemp()
+ os.chdir(newpath)
+
+and declare its use in a test module via a ``usefixtures`` marker::
+
+ # content of test_setenv.py
+ import os
+ import pytest
+
+ @pytest.mark.usefixtures("cleandir")
+ class TestDirectoryInit:
+ def test_cwd_starts_empty(self):
+ assert os.listdir(os.getcwd()) == []
+ with open("myfile", "w") as f:
+ f.write("hello")
+
+ def test_cwd_again_starts_empty(self):
+ assert os.listdir(os.getcwd()) == []
+
+Due to the ``usefixtures`` marker, the ``cleandir`` fixture
+will be required for the execution of each test method, just as if
+you specified a "cleandir" function argument to each of them. Let's run it
+to verify our fixture is activated and the tests pass::
+
+ $ py.test -q
+ ..
+ 2 passed in 0.12 seconds
+
+You can specify multiple fixtures like this:
+
+.. code-block:: python
+
+ @pytest.mark.usefixtures("cleandir", "anotherfixture")
+
+and you may specify fixture usage at the test module level, using
+a generic feature of the mark mechanism:
+
+.. code-block:: python
+
+ pytestmark = pytest.mark.usefixtures("cleandir")
+
+Note that the assigned variable *must* be called ``pytestmark``, assigning e.g.
+``foomark`` will not activate the fixtures.
+
+Lastly you can put fixtures required by all tests in your project
+into an ini-file:
+
+.. code-block:: ini
+
+ # content of pytest.ini
+ [pytest]
+ usefixtures = cleandir
+
+
+.. _`autouse`:
+.. _`autouse fixtures`:
+
+Autouse fixtures (xUnit setup on steroids)
+----------------------------------------------------------------------
+
+.. regendoc:wipe
+
+Occasionally, you may want to have fixtures get invoked automatically
+without a `usefixtures`_ or `funcargs`_ reference. As a practical
+example, suppose we have a database fixture which has a
+begin/rollback/commit architecture and we want to automatically surround
+each test method by a transaction and a rollback. Here is a dummy
+self-contained implementation of this idea::
+
+ # content of test_db_transact.py
+
+ import pytest
+
+ class DB:
+ def __init__(self):
+ self.intransaction = []
+ def begin(self, name):
+ self.intransaction.append(name)
+ def rollback(self):
+ self.intransaction.pop()
+
+ @pytest.fixture(scope="module")
+ def db():
+ return DB()
+
+ class TestClass:
+ @pytest.fixture(autouse=True)
+ def transact(self, request, db):
+ db.begin(request.function.__name__)
+ request.addfinalizer(db.rollback)
+
+ def test_method1(self, db):
+ assert db.intransaction == ["test_method1"]
+
+ def test_method2(self, db):
+ assert db.intransaction == ["test_method2"]
+
+The class-level ``transact`` fixture is marked with *autouse=true*
+which implies that all test methods in the class will use this fixture
+without a need to state it in the test function signature or with a
+class-level ``usefixtures`` decorator.
+
+If we run it, we get two passing tests::
+
+ $ py.test -q
+ ..
+ 2 passed in 0.12 seconds
+
+Here is how autouse fixtures work in other scopes:
+
+- if an autouse fixture is defined in a test module, all its test
+ functions automatically use it.
+
+- if an autouse fixture is defined in a conftest.py file then all tests in
+ all test modules below its directory will invoke the fixture.
+
+- lastly, and **please use that with care**: if you define an autouse
+ fixture in a plugin, it will be invoked for all tests in all projects
+ where the plugin is installed. This can be useful if a fixture only
+ anyway works in the presence of certain settings e. g. in the ini-file. Such
+ a global fixture should always quickly determine if it should do
+ any work and avoid otherwise expensive imports or computation.
+
+Note that the above ``transact`` fixture may very well be a fixture that
+you want to make available in your project without having it generally
+active. The canonical way to do that is to put the transact definition
+into a conftest.py file **without** using ``autouse``::
+
+ # content of conftest.py
+ @pytest.fixture()
+ def transact(self, request, db):
+ db.begin()
+ request.addfinalizer(db.rollback)
+
+and then e.g. have a TestClass using it by declaring the need::
+
+ @pytest.mark.usefixtures("transact")
+ class TestClass:
+ def test_method1(self):
+ ...
+
+All test methods in this TestClass will use the transaction fixture while
+other test classes or functions in the module will not use it unless
+they also add a ``transact`` reference.
+
+Shifting (visibility of) fixture functions
+----------------------------------------------------
+
+If during implementing your tests you realize that you
+want to use a fixture function from multiple test files you can move it
+to a :ref:`conftest.py <conftest.py>` file or even separately installable
+:ref:`plugins <plugins>` without changing test code. The discovery of
+fixtures functions starts at test classes, then test modules, then
+``conftest.py`` files and finally builtin and third party plugins.
+
+Overriding fixtures on various levels
+-------------------------------------
+
+In relatively large test suite, you most likely need to ``override`` a ``global`` or ``root`` fixture with a ``locally``
+defined one, keeping the test code readable and maintainable.
+
+Override a fixture on a folder (conftest) level
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given the tests file structure is:
+
+::
+
+ tests/
+ __init__.py
+
+ conftest.py
+ # content of tests/conftest.py
+ import pytest
+
+ @pytest.fixture
+ def username():
+ return 'username'
+
+ test_something.py
+ # content of tests/test_something.py
+ def test_username(username):
+ assert username == 'username'
+
+ subfolder/
+ __init__.py
+
+ conftest.py
+ # content of tests/subfolder/conftest.py
+ import pytest
+
+ @pytest.fixture
+ def username(username):
+ return 'overridden-' + username
+
+ test_something.py
+ # content of tests/subfolder/test_something.py
+ def test_username(username):
+ assert username == 'overridden-username'
+
+As you can see, a fixture with the same name can be overridden for certain test folder level.
+Note that the ``base`` or ``super`` fixture can be accessed from the ``overriding``
+fixture easily - used in the example above.
+
+Override a fixture on a test module level
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given the tests file structure is:
+
+::
+
+ tests/
+ __init__.py
+
+ conftest.py
+ # content of tests/conftest.py
+ @pytest.fixture
+ def username():
+ return 'username'
+
+ test_something.py
+ # content of tests/test_something.py
+ import pytest
+
+ @pytest.fixture
+ def username(username):
+ return 'overridden-' + username
+
+ def test_username(username):
+ assert username == 'overridden-username'
+
+ test_something_else.py
+ # content of tests/test_something_else.py
+ import pytest
+
+ @pytest.fixture
+ def username(username):
+ return 'overridden-else-' + username
+
+ def test_username(username):
+ assert username == 'overridden-else-username'
+
+In the example above, a fixture with the same name can be overridden for certain test module.
+
+
+Override a fixture with direct test parametrization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given the tests file structure is:
+
+::
+
+ tests/
+ __init__.py
+
+ conftest.py
+ # content of tests/conftest.py
+ import pytest
+
+ @pytest.fixture
+ def username():
+ return 'username'
+
+ @pytest.fixture
+ def other_username(username):
+ return 'other-' + username
+
+ test_something.py
+ # content of tests/test_something.py
+ import pytest
+
+ @pytest.mark.parametrize('username', ['directly-overridden-username'])
+ def test_username(username):
+ assert username == 'directly-overridden-username'
+
+ @pytest.mark.parametrize('username', ['directly-overridden-username-other'])
+ def test_username_other(other_username):
+ assert username == 'other-directly-overridden-username-other'
+
+In the example above, a fixture value is overridden by the test parameter value. Note that the value of the fixture
+can be overridden this way even if the test doesn't use it directly (doesn't mention it in the function prototype).
+
+
+Override a parametrized fixture with non-parametrized one and vice versa
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given the tests file structure is:
+
+::
+
+ tests/
+ __init__.py
+
+ conftest.py
+ # content of tests/conftest.py
+ import pytest
+
+ @pytest.fixture(params=['one', 'two', 'three'])
+ def parametrized_username(request):
+ return request.param
+
+ @pytest.fixture
+ def non_parametrized_username(request):
+ return 'username'
+
+ test_something.py
+ # content of tests/test_something.py
+ import pytest
+
+ @pytest.fixture
+ def parametrized_username():
+ return 'overridden-username'
+
+ @pytest.fixture(params=['one', 'two', 'three'])
+ def non_parametrized_username(request):
+ return request.param
+
+ def test_username(parametrized_username):
+ assert parametrized_username == 'overridden-username'
+
+ def test_parametrized_username(non_parametrized_username):
+ assert non_parametrized_username in ['one', 'two', 'three']
+
+ test_something_else.py
+ # content of tests/test_something_else.py
+ def test_username(parametrized_username):
+ assert parametrized_username in ['one', 'two', 'three']
+
+ def test_username(non_parametrized_username):
+ assert non_parametrized_username == 'username'
+
+In the example above, a parametrized fixture is overridden with a non-parametrized version, and
+a non-parametrized fixture is overridden with a parametrized version for certain test module.
+The same applies for the test folder level obviously.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/funcarg_compare.rst b/testing/web-platform/tests/tools/pytest/doc/en/funcarg_compare.rst
new file mode 100644
index 000000000..832922e18
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/funcarg_compare.rst
@@ -0,0 +1,217 @@
+
+.. _`funcargcompare`:
+
+pytest-2.3: reasoning for fixture/funcarg evolution
+=============================================================
+
+**Target audience**: Reading this document requires basic knowledge of
+python testing, xUnit setup methods and the (previous) basic pytest
+funcarg mechanism, see http://pytest.org/2.2.4/funcargs.html
+If you are new to pytest, then you can simply ignore this
+section and read the other sections.
+
+.. currentmodule:: _pytest
+
+Shortcomings of the previous ``pytest_funcarg__`` mechanism
+--------------------------------------------------------------
+
+The pre pytest-2.3 funcarg mechanism calls a factory each time a
+funcarg for a test function is required. If a factory wants to
+re-use a resource across different scopes, it often used
+the ``request.cached_setup()`` helper to manage caching of
+resources. Here is a basic example how we could implement
+a per-session Database object::
+
+ # content of conftest.py
+ class Database:
+ def __init__(self):
+ print ("database instance created")
+ def destroy(self):
+ print ("database instance destroyed")
+
+ def pytest_funcarg__db(request):
+ return request.cached_setup(setup=DataBase,
+ teardown=lambda db: db.destroy,
+ scope="session")
+
+There are several limitations and difficulties with this approach:
+
+1. Scoping funcarg resource creation is not straight forward, instead one must
+ understand the intricate cached_setup() method mechanics.
+
+2. parametrizing the "db" resource is not straight forward:
+ you need to apply a "parametrize" decorator or implement a
+ :py:func:`~hookspec.pytest_generate_tests` hook
+ calling :py:func:`~python.Metafunc.parametrize` which
+ performs parametrization at the places where the resource
+ is used. Moreover, you need to modify the factory to use an
+ ``extrakey`` parameter containing ``request.param`` to the
+ :py:func:`~python.Request.cached_setup` call.
+
+3. Multiple parametrized session-scoped resources will be active
+ at the same time, making it hard for them to affect global state
+ of the application under test.
+
+4. there is no way how you can make use of funcarg factories
+ in xUnit setup methods.
+
+5. A non-parametrized fixture function cannot use a parametrized
+ funcarg resource if it isn't stated in the test function signature.
+
+All of these limitations are addressed with pytest-2.3 and its
+improved :ref:`fixture mechanism <fixture>`.
+
+
+Direct scoping of fixture/funcarg factories
+--------------------------------------------------------
+
+Instead of calling cached_setup() with a cache scope, you can use the
+:ref:`@pytest.fixture <pytest.fixture>` decorator and directly state
+the scope::
+
+ @pytest.fixture(scope="session")
+ def db(request):
+ # factory will only be invoked once per session -
+ db = DataBase()
+ request.addfinalizer(db.destroy) # destroy when session is finished
+ return db
+
+This factory implementation does not need to call ``cached_setup()`` anymore
+because it will only be invoked once per session. Moreover, the
+``request.addfinalizer()`` registers a finalizer according to the specified
+resource scope on which the factory function is operating.
+
+
+Direct parametrization of funcarg resource factories
+----------------------------------------------------------
+
+Previously, funcarg factories could not directly cause parametrization.
+You needed to specify a ``@parametrize`` decorator on your test function
+or implement a ``pytest_generate_tests`` hook to perform
+parametrization, i.e. calling a test multiple times with different value
+sets. pytest-2.3 introduces a decorator for use on the factory itself::
+
+ @pytest.fixture(params=["mysql", "pg"])
+ def db(request):
+ ... # use request.param
+
+Here the factory will be invoked twice (with the respective "mysql"
+and "pg" values set as ``request.param`` attributes) and and all of
+the tests requiring "db" will run twice as well. The "mysql" and
+"pg" values will also be used for reporting the test-invocation variants.
+
+This new way of parametrizing funcarg factories should in many cases
+allow to re-use already written factories because effectively
+``request.param`` was already used when test functions/classes were
+parametrized via
+:py:func:`~_pytest.python.Metafunc.parametrize(indirect=True)` calls.
+
+Of course it's perfectly fine to combine parametrization and scoping::
+
+ @pytest.fixture(scope="session", params=["mysql", "pg"])
+ def db(request):
+ if request.param == "mysql":
+ db = MySQL()
+ elif request.param == "pg":
+ db = PG()
+ request.addfinalizer(db.destroy) # destroy when session is finished
+ return db
+
+This would execute all tests requiring the per-session "db" resource twice,
+receiving the values created by the two respective invocations to the
+factory function.
+
+
+No ``pytest_funcarg__`` prefix when using @fixture decorator
+-------------------------------------------------------------------
+
+When using the ``@fixture`` decorator the name of the function
+denotes the name under which the resource can be accessed as a function
+argument::
+
+ @pytest.fixture()
+ def db(request):
+ ...
+
+The name under which the funcarg resource can be requested is ``db``.
+
+You can still use the "old" non-decorator way of specifying funcarg factories
+aka::
+
+ def pytest_funcarg__db(request):
+ ...
+
+
+But it is then not possible to define scoping and parametrization.
+It is thus recommended to use the factory decorator.
+
+
+solving per-session setup / autouse fixtures
+--------------------------------------------------------------
+
+pytest for a long time offered a pytest_configure and a pytest_sessionstart
+hook which are often used to setup global resources. This suffers from
+several problems:
+
+1. in distributed testing the master process would setup test resources
+ that are never needed because it only co-ordinates the test run
+ activities of the slave processes.
+
+2. if you only perform a collection (with "--collect-only")
+ resource-setup will still be executed.
+
+3. If a pytest_sessionstart is contained in some subdirectories
+ conftest.py file, it will not be called. This stems from the
+ fact that this hook is actually used for reporting, in particular
+ the test-header with platform/custom information.
+
+Moreover, it was not easy to define a scoped setup from plugins or
+conftest files other than to implement a ``pytest_runtest_setup()`` hook
+and caring for scoping/caching yourself. And it's virtually impossible
+to do this with parametrization as ``pytest_runtest_setup()`` is called
+during test execution and parametrization happens at collection time.
+
+It follows that pytest_configure/session/runtest_setup are often not
+appropriate for implementing common fixture needs. Therefore,
+pytest-2.3 introduces :ref:`autouse fixtures` which fully
+integrate with the generic :ref:`fixture mechanism <fixture>`
+and obsolete many prior uses of pytest hooks.
+
+funcargs/fixture discovery now happens at collection time
+---------------------------------------------------------------------
+
+pytest-2.3 takes care to discover fixture/funcarg factories
+at collection time. This is more efficient especially for large test suites.
+Moreover, a call to "py.test --collect-only" should be able to in the future
+show a lot of setup-information and thus presents a nice method to get an
+overview of fixture management in your project.
+
+.. _`compatibility notes`:
+
+.. _`funcargscompat`:
+
+Conclusion and compatibility notes
+---------------------------------------------------------
+
+**funcargs** were originally introduced to pytest-2.0. In pytest-2.3
+the mechanism was extended and refined and is now described as
+fixtures:
+
+* previously funcarg factories were specified with a special
+ ``pytest_funcarg__NAME`` prefix instead of using the
+ ``@pytest.fixture`` decorator.
+
+* Factories received a ``request`` object which managed caching through
+ ``request.cached_setup()`` calls and allowed using other funcargs via
+ ``request.getfuncargvalue()`` calls. These intricate APIs made it hard
+ to do proper parametrization and implement resource caching. The
+ new :py:func:`pytest.fixture` decorator allows to declare the scope
+ and let pytest figure things out for you.
+
+* if you used parametrization and funcarg factories which made use of
+ ``request.cached_setup()`` it is recommended to invest a few minutes
+ and simplify your fixture function code to use the :ref:`@pytest.fixture`
+ decorator instead. This will also allow to take advantage of
+ the automatic per-resource grouping of tests.
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/funcargs.rst b/testing/web-platform/tests/tools/pytest/doc/en/funcargs.rst
new file mode 100644
index 000000000..bc2c04302
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/funcargs.rst
@@ -0,0 +1,14 @@
+
+=======================================================
+funcargs: resource injection and parametrization
+=======================================================
+
+pytest-2.3 introduces major refinements to fixture management
+of which the funcarg mechanism introduced with pytest-2.0 remains
+a core part. The documentation has been refactored as well
+and you can read on here:
+
+- :ref:`fixtures`
+- :ref:`parametrize`
+- :ref:`funcargcompare`
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/genapi.py b/testing/web-platform/tests/tools/pytest/doc/en/genapi.py
new file mode 100644
index 000000000..f8cdda6cf
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/genapi.py
@@ -0,0 +1,41 @@
+import textwrap
+import inspect
+
+class Writer:
+ def __init__(self, clsname):
+ self.clsname = clsname
+
+ def __enter__(self):
+ self.file = open("%s.api" % self.clsname, "w")
+ return self
+
+ def __exit__(self, *args):
+ self.file.close()
+ print "wrote", self.file.name
+
+ def line(self, line):
+ self.file.write(line+"\n")
+
+ def docmethod(self, method):
+ doc = " ".join(method.__doc__.split())
+ indent = " "
+ w = textwrap.TextWrapper(initial_indent=indent,
+ subsequent_indent=indent)
+
+ spec = inspect.getargspec(method)
+ del spec.args[0]
+ self.line(".. py:method:: " + method.__name__ +
+ inspect.formatargspec(*spec))
+ self.line("")
+ self.line(w.fill(doc))
+ self.line("")
+
+def pytest_funcarg__a(request):
+ with Writer("request") as writer:
+ writer.docmethod(request.getfuncargvalue)
+ writer.docmethod(request.cached_setup)
+ writer.docmethod(request.addfinalizer)
+ writer.docmethod(request.applymarker)
+
+def test_hello(a):
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/getting-started.rst b/testing/web-platform/tests/tools/pytest/doc/en/getting-started.rst
new file mode 100644
index 000000000..4a5b75aea
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/getting-started.rst
@@ -0,0 +1,237 @@
+Installation and Getting Started
+===================================
+
+**Pythons**: Python 2.6,2.7,3.3,3.4,3.5, Jython, PyPy-2.3
+
+**Platforms**: Unix/Posix and Windows
+
+**PyPI package name**: `pytest <http://pypi.python.org/pypi/pytest>`_
+
+**dependencies**: `py <http://pypi.python.org/pypi/py>`_,
+`colorama (Windows) <http://pypi.python.org/pypi/colorama>`_,
+`argparse (py26) <http://pypi.python.org/pypi/argparse>`_.
+
+**documentation as PDF**: `download latest <http://pytest.org/latest/pytest.pdf>`_
+
+.. _`getstarted`:
+.. _installation:
+
+Installation
+----------------------------------------
+
+Installation options::
+
+ pip install -U pytest # or
+ easy_install -U pytest
+
+To check your installation has installed the correct version::
+
+ $ py.test --version
+ This is pytest version 2.9.1, imported from $PYTHON_PREFIX/lib/python3.4/site-packages/pytest.py
+
+If you get an error checkout :ref:`installation issues`.
+
+.. _`simpletest`:
+
+Our first test run
+----------------------------------------------------------
+
+Let's create a first test file with a simple test function::
+
+ # content of test_sample.py
+ def func(x):
+ return x + 1
+
+ def test_answer():
+ assert func(3) == 5
+
+That's it. You can execute the test function now::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_sample.py F
+
+ ======= FAILURES ========
+ _______ test_answer ________
+
+ def test_answer():
+ > assert func(3) == 5
+ E assert 4 == 5
+ E + where 4 = func(3)
+
+ test_sample.py:5: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+We got a failure report because our little ``func(3)`` call did not return ``5``.
+
+.. note::
+
+ You can simply use the ``assert`` statement for asserting test
+ expectations. pytest's :ref:`assert introspection` will intelligently
+ report intermediate values of the assert expression freeing
+ you from the need to learn the many names of `JUnit legacy methods`_.
+
+.. _`JUnit legacy methods`: http://docs.python.org/library/unittest.html#test-cases
+
+.. _`assert statement`: http://docs.python.org/reference/simple_stmts.html#the-assert-statement
+
+Running multiple tests
+----------------------------------------------------------
+
+``pytest`` will run all files in the current directory and its subdirectories of the form test_*.py or \*_test.py. More generally, it follows :ref:`standard test discovery rules <test discovery>`.
+
+
+Asserting that a certain exception is raised
+--------------------------------------------------------------
+
+If you want to assert that some code raises an exception you can
+use the ``raises`` helper::
+
+ # content of test_sysexit.py
+ import pytest
+ def f():
+ raise SystemExit(1)
+
+ def test_mytest():
+ with pytest.raises(SystemExit):
+ f()
+
+Running it with, this time in "quiet" reporting mode::
+
+ $ py.test -q test_sysexit.py
+ .
+ 1 passed in 0.12 seconds
+
+Grouping multiple tests in a class
+--------------------------------------------------------------
+
+Once you start to have more than a few tests it often makes sense
+to group tests logically, in classes and modules. Let's write a class
+containing two tests::
+
+ # content of test_class.py
+ class TestClass:
+ def test_one(self):
+ x = "this"
+ assert 'h' in x
+
+ def test_two(self):
+ x = "hello"
+ assert hasattr(x, 'check')
+
+The two tests are found because of the standard :ref:`test discovery`.
+There is no need to subclass anything. We can simply
+run the module by passing its filename::
+
+ $ py.test -q test_class.py
+ .F
+ ======= FAILURES ========
+ _______ TestClass.test_two ________
+
+ self = <test_class.TestClass object at 0xdeadbeef>
+
+ def test_two(self):
+ x = "hello"
+ > assert hasattr(x, 'check')
+ E assert hasattr('hello', 'check')
+
+ test_class.py:8: AssertionError
+ 1 failed, 1 passed in 0.12 seconds
+
+The first test passed, the second failed. Again we can easily see
+the intermediate values used in the assertion, helping us to
+understand the reason for the failure.
+
+Going functional: requesting a unique temporary directory
+--------------------------------------------------------------
+
+For functional tests one often needs to create some files
+and pass them to application objects. pytest provides
+:ref:`builtinfixtures` which allow to request arbitrary
+resources, for example a unique temporary directory::
+
+ # content of test_tmpdir.py
+ def test_needsfiles(tmpdir):
+ print (tmpdir)
+ assert 0
+
+We list the name ``tmpdir`` in the test function signature and
+``pytest`` will lookup and call a fixture factory to create the resource
+before performing the test function call. Let's just run it::
+
+ $ py.test -q test_tmpdir.py
+ F
+ ======= FAILURES ========
+ _______ test_needsfiles ________
+
+ tmpdir = local('PYTEST_TMPDIR/test_needsfiles0')
+
+ def test_needsfiles(tmpdir):
+ print (tmpdir)
+ > assert 0
+ E assert 0
+
+ test_tmpdir.py:3: AssertionError
+ --------------------------- Captured stdout call ---------------------------
+ PYTEST_TMPDIR/test_needsfiles0
+ 1 failed in 0.12 seconds
+
+Before the test runs, a unique-per-test-invocation temporary directory
+was created. More info at :ref:`tmpdir handling`.
+
+You can find out what kind of builtin :ref:`fixtures` exist by typing::
+
+ py.test --fixtures # shows builtin and custom fixtures
+
+Where to go next
+-------------------------------------
+
+Here are a few suggestions where to go next:
+
+* :ref:`cmdline` for command line invocation examples
+* :ref:`good practices <goodpractices>` for virtualenv, test layout, genscript support
+* :ref:`fixtures` for providing a functional baseline to your tests
+* :ref:`apiref` for documentation and examples on using ``pytest``
+* :ref:`plugins` managing and writing plugins
+
+.. _`installation issues`:
+
+Known Installation issues
+------------------------------
+
+easy_install or pip not found?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. _`install pip`: http://www.pip-installer.org/en/latest/index.html
+
+`Install pip`_ for a state of the art python package installer.
+
+Install `setuptools`_ to get ``easy_install`` which allows to install
+``.egg`` binary format packages in addition to source-based ones.
+
+py.test not found on Windows despite installation?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. _`Python for Windows`: http://www.imladris.com/Scripts/PythonForWindows.html
+
+- **Windows**: If "easy_install" or "py.test" are not found
+ you need to add the Python script path to your ``PATH``, see here:
+ `Python for Windows`_. You may alternatively use an `ActivePython install`_
+ which does this for you automatically.
+
+.. _`ActivePython install`: http://www.activestate.com/activepython/downloads
+
+.. _`Jython does not create command line launchers`: http://bugs.jython.org/issue1491
+
+- **Jython2.5.1 on Windows XP**: `Jython does not create command line launchers`_
+ so ``py.test`` will not work correctly. You may install py.test on
+ CPython and type ``py.test --genscript=mytest`` and then use
+ ``jython mytest`` to run your tests with Jython using ``pytest``.
+
+ :ref:`examples` for more complex examples
+
+.. include:: links.inc
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/goodpractices.rst b/testing/web-platform/tests/tools/pytest/doc/en/goodpractices.rst
new file mode 100644
index 000000000..2d8050bd9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/goodpractices.rst
@@ -0,0 +1,278 @@
+.. highlightlang:: python
+.. _`goodpractices`:
+
+Good Integration Practices
+=================================================
+
+
+.. _`test discovery`:
+.. _`Python test discovery`:
+
+Conventions for Python test discovery
+-------------------------------------------------
+
+``pytest`` implements the following standard test discovery:
+
+* If no arguments are specified then collection starts from :confval:`testpaths`
+ (if configured) or the current directory. Alternatively, command line arguments
+ can be used in any combination of directories, file names or node ids.
+* recurse into directories, unless they match :confval:`norecursedirs`
+* ``test_*.py`` or ``*_test.py`` files, imported by their `test package name`_.
+* ``Test`` prefixed test classes (without an ``__init__`` method)
+* ``test_`` prefixed test functions or methods are test items
+
+For examples of how to customize your test discovery :doc:`example/pythoncollection`.
+
+Within Python modules, ``pytest`` also discovers tests using the standard
+:ref:`unittest.TestCase <unittest.TestCase>` subclassing technique.
+
+
+Choosing a test layout / import rules
+------------------------------------------
+
+``pytest`` supports two common test layouts:
+
+* putting tests into an extra directory outside your actual application
+ code, useful if you have many functional tests or for other reasons
+ want to keep tests separate from actual application code (often a good
+ idea)::
+
+ setup.py # your setuptools Python package metadata
+ mypkg/
+ __init__.py
+ appmodule.py
+ tests/
+ test_app.py
+ ...
+
+
+* inlining test directories into your application package, useful if you
+ have direct relation between (unit-)test and application modules and
+ want to distribute your tests along with your application::
+
+ setup.py # your setuptools Python package metadata
+ mypkg/
+ __init__.py
+ appmodule.py
+ ...
+ test/
+ test_app.py
+ ...
+
+Important notes relating to both schemes:
+
+- **make sure that "mypkg" is importable**, for example by typing once::
+
+ pip install -e . # install package using setup.py in editable mode
+
+- **avoid "__init__.py" files in your test directories**.
+ This way your tests can run easily against an installed version
+ of ``mypkg``, independently from the installed package if it contains
+ the tests or not.
+
+- With inlined tests you might put ``__init__.py`` into test
+ directories and make them installable as part of your application.
+ Using the ``py.test --pyargs mypkg`` invocation pytest will
+ discover where mypkg is installed and collect tests from there.
+ With the "external" test you can still distribute tests but they
+ will not be installed or become importable.
+
+Typically you can run tests by pointing to test directories or modules::
+
+ py.test tests/test_app.py # for external test dirs
+ py.test mypkg/test/test_app.py # for inlined test dirs
+ py.test mypkg # run tests in all below test directories
+ py.test # run all tests below current dir
+ ...
+
+Because of the above ``editable install`` mode you can change your
+source code (both tests and the app) and rerun tests at will.
+Once you are done with your work, you can `use tox`_ to make sure
+that the package is really correct and tests pass in all
+required configurations.
+
+.. note::
+
+ You can use Python3 namespace packages (PEP420) for your application
+ but pytest will still perform `test package name`_ discovery based on the
+ presence of ``__init__.py`` files. If you use one of the
+ two recommended file system layouts above but leave away the ``__init__.py``
+ files from your directories it should just work on Python3.3 and above. From
+ "inlined tests", however, you will need to use absolute imports for
+ getting at your application code.
+
+.. _`test package name`:
+
+.. note::
+
+ If ``pytest`` finds a "a/b/test_module.py" test file while
+ recursing into the filesystem it determines the import name
+ as follows:
+
+ * determine ``basedir``: this is the first "upward" (towards the root)
+ directory not containing an ``__init__.py``. If e.g. both ``a``
+ and ``b`` contain an ``__init__.py`` file then the parent directory
+ of ``a`` will become the ``basedir``.
+
+ * perform ``sys.path.insert(0, basedir)`` to make the test module
+ importable under the fully qualified import name.
+
+ * ``import a.b.test_module`` where the path is determined
+ by converting path separators ``/`` into "." characters. This means
+ you must follow the convention of having directory and file
+ names map directly to the import names.
+
+ The reason for this somewhat evolved importing technique is
+ that in larger projects multiple test modules might import
+ from each other and thus deriving a canonical import name helps
+ to avoid surprises such as a test modules getting imported twice.
+
+
+.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
+.. _`buildout`: http://www.buildout.org/
+.. _pip: http://pypi.python.org/pypi/pip
+
+.. _`use tox`:
+
+Tox
+------
+
+For development, we recommend to use virtualenv_ environments and pip_
+for installing your application and any dependencies
+as well as the ``pytest`` package itself. This ensures your code and
+dependencies are isolated from the system Python installation.
+
+If you frequently release code and want to make sure that your actual
+package passes all tests you may want to look into `tox`_, the
+virtualenv test automation tool and its `pytest support
+<http://testrun.org/tox/latest/example/pytest.html>`_.
+Tox helps you to setup virtualenv environments with pre-defined
+dependencies and then executing a pre-configured test command with
+options. It will run tests against the installed package and not
+against your source code checkout, helping to detect packaging
+glitches.
+
+Continuous integration services such as Jenkins_ can make use of the
+``--junitxml=PATH`` option to create a JUnitXML file and generate reports.
+
+
+Integrating with setuptools / ``python setup.py test`` / ``pytest-runner``
+--------------------------------------------------------------------------
+
+You can integrate test runs into your setuptools based project
+with the `pytest-runner <https://pypi.python.org/pypi/pytest-runner>`_ plugin.
+
+Add this to ``setup.py`` file:
+
+.. code-block:: python
+
+ from setuptools import setup
+
+ setup(
+ #...,
+ setup_requires=['pytest-runner', ...],
+ tests_require=['pytest', ...],
+ #...,
+ )
+
+
+And create an alias into ``setup.cfg`` file:
+
+
+.. code-block:: ini
+
+ [aliases]
+ test=pytest
+
+If you now type::
+
+ python setup.py test
+
+this will execute your tests using ``pytest-runner``. As this is a
+standalone version of ``pytest`` no prior installation whatsoever is
+required for calling the test command. You can also pass additional
+arguments to py.test such as your test directory or other
+options using ``--addopts``.
+
+
+Manual Integration
+^^^^^^^^^^^^^^^^^^
+
+If for some reason you don't want/can't use ``pytest-runner``, you can write
+your own setuptools Test command for invoking pytest.
+
+.. code-block:: python
+
+ import sys
+
+ from setuptools.command.test import test as TestCommand
+
+
+ class PyTest(TestCommand):
+ user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
+
+ def initialize_options(self):
+ TestCommand.initialize_options(self)
+ self.pytest_args = []
+
+ def run_tests(self):
+ #import here, cause outside the eggs aren't loaded
+ import pytest
+ errno = pytest.main(self.pytest_args)
+ sys.exit(errno)
+
+
+ setup(
+ #...,
+ tests_require=['pytest'],
+ cmdclass = {'test': PyTest},
+ )
+
+Now if you run::
+
+ python setup.py test
+
+this will download ``pytest`` if needed and then run your tests
+as you would expect it to. You can pass a single string of arguments
+using the ``--pytest-args`` or ``-a`` command-line option. For example::
+
+ python setup.py test -a "--durations=5"
+
+is equivalent to running ``py.test --durations=5``.
+
+
+.. _standalone:
+.. _`genscript method`:
+
+(deprecated) Create a pytest standalone script
+-----------------------------------------------
+
+.. deprecated:: 2.8
+
+.. note::
+
+ ``genscript`` has been deprecated because:
+
+ * It cannot support plugins, rendering its usefulness extremely limited;
+ * Tooling has become much better since ``genscript`` was introduced;
+ * It is possible to build a zipped ``pytest`` application without the
+ shortcomings above.
+
+ There's no planned version in which this command will be removed
+ at the moment of this writing, but its use is discouraged for new
+ applications.
+
+If you are a maintainer or application developer and want people
+who don't deal with python much to easily run tests you may generate
+a standalone ``pytest`` script::
+
+ py.test --genscript=runtests.py
+
+This generates a ``runtests.py`` script which is a fully functional basic
+``pytest`` script, running unchanged under Python2 and Python3.
+You can tell people to download the script and then e.g. run it like this::
+
+ python runtests.py
+
+
+.. include:: links.inc
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/cramer2.png b/testing/web-platform/tests/tools/pytest/doc/en/img/cramer2.png
new file mode 100644
index 000000000..6bf0e92e2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/cramer2.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/freiburg2.jpg b/testing/web-platform/tests/tools/pytest/doc/en/img/freiburg2.jpg
new file mode 100644
index 000000000..3383d3023
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/freiburg2.jpg
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/gaynor3.png b/testing/web-platform/tests/tools/pytest/doc/en/img/gaynor3.png
new file mode 100644
index 000000000..a577c168b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/gaynor3.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/keleshev.png b/testing/web-platform/tests/tools/pytest/doc/en/img/keleshev.png
new file mode 100644
index 000000000..0d5e571e2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/keleshev.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/pullrequest.png b/testing/web-platform/tests/tools/pytest/doc/en/img/pullrequest.png
new file mode 100644
index 000000000..4af293b21
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/pullrequest.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/pylib.png b/testing/web-platform/tests/tools/pytest/doc/en/img/pylib.png
new file mode 100644
index 000000000..2e10d4388
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/pylib.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/pytest1.png b/testing/web-platform/tests/tools/pytest/doc/en/img/pytest1.png
new file mode 100644
index 000000000..e8064a694
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/pytest1.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/pytest1favi.ico b/testing/web-platform/tests/tools/pytest/doc/en/img/pytest1favi.ico
new file mode 100644
index 000000000..6a34fe5c9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/pytest1favi.ico
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/img/theuni.png b/testing/web-platform/tests/tools/pytest/doc/en/img/theuni.png
new file mode 100644
index 000000000..abeb737e7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/img/theuni.png
Binary files differ
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/index.rst b/testing/web-platform/tests/tools/pytest/doc/en/index.rst
new file mode 100644
index 000000000..04b4512da
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/index.rst
@@ -0,0 +1,61 @@
+
+.. _features:
+
+pytest: helps you write better programs
+=============================================
+
+**a mature full-featured Python testing tool**
+
+ - runs on Posix/Windows, Python 2.6-3.5, PyPy and (possibly still) Jython-2.5.1
+ - free and open source software, distributed under the terms of the :ref:`MIT license <license>`
+ - **well tested** with more than a thousand tests against itself
+ - **strict backward compatibility policy** for safe pytest upgrades
+ - :ref:`comprehensive online <toc>` and `PDF documentation <pytest.pdf>`_
+ - many :ref:`third party plugins <extplugins>` and :ref:`builtin helpers <pytest helpers>`,
+ - used in :ref:`many small and large projects and organisations <projects>`
+ - comes with many :ref:`tested examples <examples>`
+
+**provides easy no-boilerplate testing**
+
+ - makes it :ref:`easy to get started <getstarted>`,
+ has many :ref:`usage options <usage>`
+ - :ref:`assert with the assert statement`
+ - helpful :ref:`traceback and failing assertion reporting <tbreportdemo>`
+ - :ref:`print debugging <printdebugging>` and :ref:`the
+ capturing of standard output during test execution <captures>`
+
+**scales from simple unit to complex functional testing**
+
+ - :ref:`modular parametrizeable fixtures <fixture>` (new in 2.3,
+ continuously improved)
+ - :ref:`parametrized test functions <parametrized test functions>`
+ - :ref:`mark`
+ - :ref:`skipping` (improved in 2.4)
+ - :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
+ - :ref:`continuously re-run failing tests <looponfailing>`
+ - :doc:`cache`
+ - flexible :ref:`Python test discovery`
+
+**integrates with other testing methods and tools**:
+
+ - multi-paradigm: pytest can run ``nose``, ``unittest`` and
+ ``doctest`` style test suites, including running testcases made for
+ Django and trial
+ - supports :ref:`good integration practices <goodpractices>`
+ - supports extended :ref:`xUnit style setup <xunitsetup>`
+ - supports domain-specific :ref:`non-python tests`
+ - supports generating `test coverage reports
+ <https://pypi.python.org/pypi/pytest-cov>`_
+ - supports :pep:`8` compliant coding styles in tests
+
+**extensive plugin and customization system**:
+
+ - all collection, reporting, running aspects are delegated to hook functions
+ - customizations can be per-directory, per-project or per PyPI released plugin
+ - it is easy to add command line options or customize existing behaviour
+ - :ref:`easy to write your own plugins <writing-plugins>`
+
+
+.. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/license.rst b/testing/web-platform/tests/tools/pytest/doc/en/license.rst
new file mode 100644
index 000000000..3fc1dad52
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/license.rst
@@ -0,0 +1,32 @@
+.. _license:
+
+License
+-------
+
+Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
+
+::
+
+ The MIT License (MIT)
+
+ Copyright (c) 2004-2016 Holger Krekel and others
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
+ this software and associated documentation files (the "Software"), to deal in
+ the Software without restriction, including without limitation the rights to
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is furnished to do
+ so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
+.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/links.inc b/testing/web-platform/tests/tools/pytest/doc/en/links.inc
new file mode 100644
index 000000000..3d7863751
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/links.inc
@@ -0,0 +1,21 @@
+
+.. _`skipping plugin`: plugin/skipping.html
+.. _`funcargs mechanism`: funcargs.html
+.. _`doctest.py`: http://docs.python.org/library/doctest.html
+.. _`xUnit style setup`: xunit_setup.html
+.. _`pytest_nose`: plugin/nose.html
+.. _`reStructured Text`: http://docutils.sourceforge.net
+.. _`Python debugger`: http://docs.python.org/lib/module-pdb.html
+.. _nose: https://nose.readthedocs.org/en/latest/
+.. _pytest: http://pypi.python.org/pypi/pytest
+.. _mercurial: http://mercurial.selenic.com/wiki/
+.. _`setuptools`: http://pypi.python.org/pypi/setuptools
+.. _`easy_install`:
+.. _`distribute docs`:
+.. _`distribute`: http://pypi.python.org/pypi/distribute
+.. _`pip`: http://pypi.python.org/pypi/pip
+.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
+.. _hudson: http://hudson-ci.org/
+.. _jenkins: http://jenkins-ci.org/
+.. _tox: http://testrun.org/tox
+.. _pylib: http://py.readthedocs.org/en/latest/
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/mark.rst b/testing/web-platform/tests/tools/pytest/doc/en/mark.rst
new file mode 100644
index 000000000..ab9546d31
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/mark.rst
@@ -0,0 +1,40 @@
+
+.. _mark:
+
+Marking test functions with attributes
+=================================================================
+
+.. currentmodule:: _pytest.mark
+
+By using the ``pytest.mark`` helper you can easily set
+metadata on your test functions. There are
+some builtin markers, for example:
+
+* :ref:`skipif <skipif>` - skip a test function if a certain condition is met
+* :ref:`xfail <xfail>` - produce an "expected failure" outcome if a certain
+ condition is met
+* :ref:`parametrize <parametrizemark>` to perform multiple calls
+ to the same test function.
+
+It's easy to create custom markers or to apply markers
+to whole test classes or modules. See :ref:`mark examples` for examples
+which also serve as documentation.
+
+.. note::
+
+ Marks can only be applied to tests, having no effect on
+ :ref:`fixtures <fixtures>`.
+
+
+API reference for mark related objects
+------------------------------------------------
+
+.. autoclass:: MarkGenerator
+ :members:
+
+.. autoclass:: MarkDecorator
+ :members:
+
+.. autoclass:: MarkInfo
+ :members:
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/monkeypatch.rst b/testing/web-platform/tests/tools/pytest/doc/en/monkeypatch.rst
new file mode 100644
index 000000000..4155a3a34
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/monkeypatch.rst
@@ -0,0 +1,82 @@
+
+Monkeypatching/mocking modules and environments
+================================================================
+
+.. currentmodule:: _pytest.monkeypatch
+
+Sometimes tests need to invoke functionality which depends
+on global settings or which invokes code which cannot be easily
+tested such as network access. The ``monkeypatch`` function argument
+helps you to safely set/delete an attribute, dictionary item or
+environment variable or to modify ``sys.path`` for importing.
+See the `monkeypatch blog post`_ for some introduction material
+and a discussion of its motivation.
+
+.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
+
+Simple example: monkeypatching functions
+---------------------------------------------------
+
+If you want to pretend that ``os.expanduser`` returns a certain
+directory, you can use the :py:meth:`monkeypatch.setattr` method to
+patch this function before calling into a function which uses it::
+
+ # content of test_module.py
+ import os.path
+ def getssh(): # pseudo application code
+ return os.path.join(os.path.expanduser("~admin"), '.ssh')
+
+ def test_mytest(monkeypatch):
+ def mockreturn(path):
+ return '/abc'
+ monkeypatch.setattr(os.path, 'expanduser', mockreturn)
+ x = getssh()
+ assert x == '/abc/.ssh'
+
+Here our test function monkeypatches ``os.path.expanduser`` and
+then calls into an function that calls it. After the test function
+finishes the ``os.path.expanduser`` modification will be undone.
+
+example: preventing "requests" from remote operations
+------------------------------------------------------
+
+If you want to prevent the "requests" library from performing http
+requests in all your tests, you can do::
+
+ # content of conftest.py
+ import pytest
+ @pytest.fixture(autouse=True)
+ def no_requests(monkeypatch):
+ monkeypatch.delattr("requests.sessions.Session.request")
+
+This autouse fixture will be executed for each test function and it
+will delete the method ``request.session.Session.request``
+so that any attempts within tests to create http requests will fail.
+
+example: setting an attribute on some class
+------------------------------------------------------
+
+If you need to patch out ``os.getcwd()`` to return an artificial
+value::
+
+ def test_some_interaction(monkeypatch):
+ monkeypatch.setattr("os.getcwd", lambda: "/")
+
+which is equivalent to the long form::
+
+ def test_some_interaction(monkeypatch):
+ import os
+ monkeypatch.setattr(os, "getcwd", lambda: "/")
+
+
+
+Method reference of the monkeypatch function argument
+-----------------------------------------------------
+
+.. autoclass:: monkeypatch
+ :members: setattr, replace, delattr, setitem, delitem, setenv, delenv, syspath_prepend, chdir, undo
+
+``monkeypatch.setattr/delattr/delitem/delenv()`` all
+by default raise an Exception if the target does not exist.
+Pass ``raising=False`` if you want to skip this check.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/naming20.rst b/testing/web-platform/tests/tools/pytest/doc/en/naming20.rst
new file mode 100644
index 000000000..5a81df269
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/naming20.rst
@@ -0,0 +1,20 @@
+
+.. _naming20:
+
+New pytest names in 2.0 (flat is better than nested)
+----------------------------------------------------
+
+If you used older version of the ``py`` distribution (which
+included the py.test command line tool and Python name space)
+you accessed helpers and possibly collection classes through
+the ``py.test`` Python namespaces. The new ``pytest``
+Python module flaty provides the same objects, following
+these renaming rules::
+
+ py.test.XYZ -> pytest.XYZ
+ py.test.collect.XYZ -> pytest.XYZ
+ py.test.cmdline.main -> pytest.main
+
+The old ``py.test.*`` ways to access functionality remain
+valid but you are encouraged to do global renaming according
+to the above rules in your test code.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/nose.rst b/testing/web-platform/tests/tools/pytest/doc/en/nose.rst
new file mode 100644
index 000000000..3b92e04cf
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/nose.rst
@@ -0,0 +1,55 @@
+Running tests written for nose
+=======================================
+
+.. include:: links.inc
+
+``pytest`` has basic support for running tests written for nose_.
+
+.. _nosestyle:
+
+Usage
+-------------
+
+After :ref:`installation` type::
+
+ python setup.py develop # make sure tests can import our package
+ py.test # instead of 'nosetests'
+
+and you should be able to run your nose style tests and
+make use of pytest's capabilities.
+
+Supported nose Idioms
+----------------------
+
+* setup and teardown at module/class/method level
+* SkipTest exceptions and markers
+* setup/teardown decorators
+* yield-based tests and their setup
+* ``__test__`` attribute on modules/classes/functions
+* general usage of nose utilities
+
+Unsupported idioms / known issues
+----------------------------------
+
+- unittest-style ``setUp, tearDown, setUpClass, tearDownClass``
+ are recognized only on ``unittest.TestCase`` classes but not
+ on plain classes. ``nose`` supports these methods also on plain
+ classes but pytest deliberately does not. As nose and pytest already
+ both support ``setup_class, teardown_class, setup_method, teardown_method``
+ it doesn't seem useful to duplicate the unittest-API like nose does.
+ If you however rather think pytest should support the unittest-spelling on
+ plain classes please post `to this issue
+ <https://github.com/pytest-dev/pytest/issues/377/>`_.
+
+- nose imports test modules with the same import path (e.g.
+ ``tests.test_mod``) but different file system paths
+ (e.g. ``tests/test_mode.py`` and ``other/tests/test_mode.py``)
+ by extending sys.path/import semantics. pytest does not do that
+ but there is discussion in `issue268 <https://github.com/pytest-dev/pytest/issues/268>`_ for adding some support. Note that
+ `nose2 choose to avoid this sys.path/import hackery <https://nose2.readthedocs.org/en/latest/differences.html#test-discovery-and-loading>`_.
+
+- nose-style doctests are not collected and executed correctly,
+ also doctest fixtures don't work.
+
+- no nose-configuration is recognized
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/overview.rst b/testing/web-platform/tests/tools/pytest/doc/en/overview.rst
new file mode 100644
index 000000000..eb2619775
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/overview.rst
@@ -0,0 +1,13 @@
+==================================================
+Getting started basics
+==================================================
+
+.. toctree::
+ :maxdepth: 2
+
+ getting-started
+ usage
+ goodpractices
+ projects
+ faq
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/parametrize.rst b/testing/web-platform/tests/tools/pytest/doc/en/parametrize.rst
new file mode 100644
index 000000000..919ac93d2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/parametrize.rst
@@ -0,0 +1,219 @@
+
+.. _`test generators`:
+.. _`parametrizing-tests`:
+.. _`parametrized test functions`:
+.. _`parametrize`:
+
+.. _`parametrize-basics`:
+
+Parametrizing fixtures and test functions
+==========================================================================
+
+pytest supports test parametrization in several well-integrated ways:
+
+- :py:func:`pytest.fixture` allows to define :ref:`parametrization
+ at the level of fixture functions <fixture-parametrize>`.
+
+* `@pytest.mark.parametrize`_ allows to define parametrization at the
+ function or class level, provides multiple argument/fixture sets
+ for a particular test function or class.
+
+* `pytest_generate_tests`_ enables implementing your own custom
+ dynamic parametrization scheme or extensions.
+
+.. _parametrizemark:
+.. _`@pytest.mark.parametrize`:
+
+
+``@pytest.mark.parametrize``: parametrizing test functions
+---------------------------------------------------------------------
+
+.. regendoc: wipe
+
+.. versionadded:: 2.2
+.. versionchanged:: 2.4
+ Several improvements.
+
+The builtin ``pytest.mark.parametrize`` decorator enables
+parametrization of arguments for a test function. Here is a typical example
+of a test function that implements checking that a certain input leads
+to an expected output::
+
+ # content of test_expectation.py
+ import pytest
+ @pytest.mark.parametrize("test_input,expected", [
+ ("3+5", 8),
+ ("2+4", 6),
+ ("6*9", 42),
+ ])
+ def test_eval(test_input, expected):
+ assert eval(test_input) == expected
+
+Here, the ``@parametrize`` decorator defines three different ``(test_input,expected)``
+tuples so that the ``test_eval`` function will run three times using
+them in turn::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 3 items
+
+ test_expectation.py ..F
+
+ ======= FAILURES ========
+ _______ test_eval[6*9-42] ________
+
+ test_input = '6*9', expected = 42
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("3+5", 8),
+ ("2+4", 6),
+ ("6*9", 42),
+ ])
+ def test_eval(test_input, expected):
+ > assert eval(test_input) == expected
+ E assert 54 == 42
+ E + where 54 = eval('6*9')
+
+ test_expectation.py:8: AssertionError
+ ======= 1 failed, 2 passed in 0.12 seconds ========
+
+As designed in this example, only one pair of input/output values fails
+the simple test function. And as usual with test function arguments,
+you can see the ``input`` and ``output`` values in the traceback.
+
+Note that you could also use the parametrize marker on a class or a module
+(see :ref:`mark`) which would invoke several functions with the argument sets.
+
+It is also possible to mark individual test instances within parametrize,
+for example with the builtin ``mark.xfail``::
+
+ # content of test_expectation.py
+ import pytest
+ @pytest.mark.parametrize("test_input,expected", [
+ ("3+5", 8),
+ ("2+4", 6),
+ pytest.mark.xfail(("6*9", 42)),
+ ])
+ def test_eval(test_input, expected):
+ assert eval(test_input) == expected
+
+Let's run this::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 3 items
+
+ test_expectation.py ..x
+
+ ======= 2 passed, 1 xfailed in 0.12 seconds ========
+
+The one parameter set which caused a failure previously now
+shows up as an "xfailed (expected to fail)" test.
+
+To get all combinations of multiple parametrized arguments you can stack
+``parametrize`` decorators::
+
+ import pytest
+ @pytest.mark.parametrize("x", [0, 1])
+ @pytest.mark.parametrize("y", [2, 3])
+ def test_foo(x, y):
+ pass
+
+This will run the test with the arguments set to x=0/y=2, x=0/y=3, x=1/y=2 and
+x=1/y=3.
+
+.. note::
+
+ In versions prior to 2.4 one needed to specify the argument
+ names as a tuple. This remains valid but the simpler ``"name1,name2,..."``
+ comma-separated-string syntax is now advertised first because
+ it's easier to write and produces less line noise.
+
+.. _`pytest_generate_tests`:
+
+Basic ``pytest_generate_tests`` example
+---------------------------------------------
+
+Sometimes you may want to implement your own parametrization scheme
+or implement some dynamism for determining the parameters or scope
+of a fixture. For this, you can use the ``pytest_generate_tests`` hook
+which is called when collecting a test function. Through the passed in
+``metafunc`` object you can inspect the requesting test context and, most
+importantly, you can call ``metafunc.parametrize()`` to cause
+parametrization.
+
+For example, let's say we want to run a test taking string inputs which
+we want to set via a new ``pytest`` command line option. Let's first write
+a simple test accepting a ``stringinput`` fixture function argument::
+
+ # content of test_strings.py
+
+ def test_valid_string(stringinput):
+ assert stringinput.isalpha()
+
+Now we add a ``conftest.py`` file containing the addition of a
+command line option and the parametrization of our test function::
+
+ # content of conftest.py
+
+ def pytest_addoption(parser):
+ parser.addoption("--stringinput", action="append", default=[],
+ help="list of stringinputs to pass to test functions")
+
+ def pytest_generate_tests(metafunc):
+ if 'stringinput' in metafunc.fixturenames:
+ metafunc.parametrize("stringinput",
+ metafunc.config.option.stringinput)
+
+If we now pass two stringinput values, our test will run twice::
+
+ $ py.test -q --stringinput="hello" --stringinput="world" test_strings.py
+ ..
+ 2 passed in 0.12 seconds
+
+Let's also run with a stringinput that will lead to a failing test::
+
+ $ py.test -q --stringinput="!" test_strings.py
+ F
+ ======= FAILURES ========
+ _______ test_valid_string[!] ________
+
+ stringinput = '!'
+
+ def test_valid_string(stringinput):
+ > assert stringinput.isalpha()
+ E assert <built-in method isalpha of str object at 0xdeadbeef>()
+ E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
+
+ test_strings.py:3: AssertionError
+ 1 failed in 0.12 seconds
+
+As expected our test function fails.
+
+If you don't specify a stringinput it will be skipped because
+``metafunc.parametrize()`` will be called with an empty parameter
+list::
+
+ $ py.test -q -rs test_strings.py
+ s
+ ======= short test summary info ========
+ SKIP [1] $PYTHON_PREFIX/lib/python3.4/site-packages/_pytest/python.py:1419: got empty parameter set, function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1
+ 1 skipped in 0.12 seconds
+
+For further examples, you might want to look at :ref:`more
+parametrization examples <paramexamples>`.
+
+.. _`metafunc object`:
+
+The **metafunc** object
+-------------------------------------------
+
+.. currentmodule:: _pytest.python
+.. autoclass:: Metafunc
+
+ .. automethod:: Metafunc.parametrize
+ .. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/plugins.rst b/testing/web-platform/tests/tools/pytest/doc/en/plugins.rst
new file mode 100644
index 000000000..e9b3f460c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/plugins.rst
@@ -0,0 +1,159 @@
+.. _`external plugins`:
+.. _`extplugins`:
+.. _`using plugins`:
+
+Installing and Using plugins
+============================
+
+This section talks about installing and using third party plugins.
+For writing your own plugins, please refer to :ref:`writing-plugins`.
+
+Installing a third party plugin can be easily done with ``pip``::
+
+ pip install pytest-NAME
+ pip uninstall pytest-NAME
+
+If a plugin is installed, ``pytest`` automatically finds and integrates it,
+there is no need to activate it.
+
+Here is a little annotated list for some popular plugins:
+
+.. _`django`: https://www.djangoproject.com/
+
+* `pytest-django <http://pypi.python.org/pypi/pytest-django>`_: write tests
+ for `django`_ apps, using pytest integration.
+
+* `pytest-twisted <http://pypi.python.org/pypi/pytest-twisted>`_: write tests
+ for `twisted <http://twistedmatrix.com>`_ apps, starting a reactor and
+ processing deferreds from test functions.
+
+* `pytest-catchlog <http://pypi.python.org/pypi/pytest-catchlog>`_:
+ to capture and assert about messages from the logging module
+
+* `pytest-cov <http://pypi.python.org/pypi/pytest-cov>`_:
+ coverage reporting, compatible with distributed testing
+
+* `pytest-xdist <http://pypi.python.org/pypi/pytest-xdist>`_:
+ to distribute tests to CPUs and remote hosts, to run in boxed
+ mode which allows to survive segmentation faults, to run in
+ looponfailing mode, automatically re-running failing tests
+ on file changes, see also :ref:`xdist`
+
+* `pytest-instafail <http://pypi.python.org/pypi/pytest-instafail>`_:
+ to report failures while the test run is happening.
+
+* `pytest-bdd <http://pypi.python.org/pypi/pytest-bdd>`_ and
+ `pytest-konira <http://pypi.python.org/pypi/pytest-konira>`_
+ to write tests using behaviour-driven testing.
+
+* `pytest-timeout <http://pypi.python.org/pypi/pytest-timeout>`_:
+ to timeout tests based on function marks or global definitions.
+
+* `pytest-pep8 <http://pypi.python.org/pypi/pytest-pep8>`_:
+ a ``--pep8`` option to enable PEP8 compliance checking.
+
+* `pytest-flakes <https://pypi.python.org/pypi/pytest-flakes>`_:
+ check source code with pyflakes.
+
+* `oejskit <http://pypi.python.org/pypi/oejskit>`_:
+ a plugin to run javascript unittests in live browsers.
+
+To see a complete list of all plugins with their latest testing
+status against different py.test and Python versions, please visit
+`plugincompat <http://plugincompat.herokuapp.com/>`_.
+
+You may also discover more plugins through a `pytest- pypi.python.org search`_.
+
+.. _`available installable plugins`:
+.. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search
+
+
+Requiring/Loading plugins in a test module or conftest file
+-----------------------------------------------------------
+
+You can require plugins in a test module or a conftest file like this::
+
+ pytest_plugins = "myapp.testsupport.myplugin",
+
+When the test module or conftest plugin is loaded the specified plugins
+will be loaded as well.
+
+ pytest_plugins = "myapp.testsupport.myplugin"
+
+which will import the specified module as a ``pytest`` plugin.
+
+.. _`findpluginname`:
+
+Finding out which plugins are active
+------------------------------------
+
+If you want to find out which plugins are active in your
+environment you can type::
+
+ py.test --traceconfig
+
+and will get an extended test header which shows activated plugins
+and their names. It will also print local plugins aka
+:ref:`conftest.py <conftest>` files when they are loaded.
+
+.. _`cmdunregister`:
+
+Deactivating / unregistering a plugin by name
+---------------------------------------------
+
+You can prevent plugins from loading or unregister them::
+
+ py.test -p no:NAME
+
+This means that any subsequent try to activate/load the named
+plugin will not work.
+
+If you want to unconditionally disable a plugin for a project, you can add
+this option to your ``pytest.ini`` file:
+
+.. code-block:: ini
+
+ [pytest]
+ addopts = -p no:NAME
+
+Alternatively to disable it only in certain environments (for example in a
+CI server), you can set ``PYTEST_ADDOPTS`` environment variable to
+``-p no:name``.
+
+See :ref:`findpluginname` for how to obtain the name of a plugin.
+
+.. _`builtin plugins`:
+
+Pytest default plugin reference
+-------------------------------
+
+
+You can find the source code for the following plugins
+in the `pytest repository <https://github.com/pytest-dev/pytest>`_.
+
+.. autosummary::
+
+ _pytest.assertion
+ _pytest.cacheprovider
+ _pytest.capture
+ _pytest.config
+ _pytest.doctest
+ _pytest.genscript
+ _pytest.helpconfig
+ _pytest.junitxml
+ _pytest.mark
+ _pytest.monkeypatch
+ _pytest.nose
+ _pytest.pastebin
+ _pytest.pdb
+ _pytest.pytester
+ _pytest.python
+ _pytest.recwarn
+ _pytest.resultlog
+ _pytest.runner
+ _pytest.main
+ _pytest.skipping
+ _pytest.terminal
+ _pytest.tmpdir
+ _pytest.unittest
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/projects.rst b/testing/web-platform/tests/tools/pytest/doc/en/projects.rst
new file mode 100644
index 000000000..76d004916
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/projects.rst
@@ -0,0 +1,85 @@
+.. _projects:
+
+.. image:: img/gaynor3.png
+ :width: 400px
+ :align: right
+
+.. image:: img/theuni.png
+ :width: 400px
+ :align: right
+
+.. image:: img/cramer2.png
+ :width: 400px
+ :align: right
+
+.. image:: img/keleshev.png
+ :width: 400px
+ :align: right
+
+
+Project examples
+==========================
+
+Here are some examples of projects using ``pytest`` (please send notes via :ref:`contact`):
+
+* `PyPy <http://pypy.org>`_, Python with a JIT compiler, running over
+ `21000 tests <http://buildbot.pypy.org/summary?branch=%3Ctrunk%3E>`_
+* the `MoinMoin <http://moinmo.in>`_ Wiki Engine
+* `sentry <https://getsentry.com/welcome/>`_, realtime app-maintenance and exception tracking
+* `Astropy <http://www.astropy.org/>`_ and `affiliated packages <http://www.astropy.org/affiliated/index.html>`_
+* `tox <http://testrun.org/tox>`_, virtualenv/Hudson integration tool
+* `PIDA <http://pida.co.uk>`_ framework for integrated development
+* `PyPM <http://code.activestate.com/pypm/>`_ ActiveState's package manager
+* `Fom <http://packages.python.org/Fom/>`_ a fluid object mapper for FluidDB
+* `applib <https://github.com/ActiveState/applib>`_ cross-platform utilities
+* `six <http://pypi.python.org/pypi/six/>`_ Python 2 and 3 compatibility utilities
+* `pediapress <http://code.pediapress.com/wiki/wiki>`_ MediaWiki articles
+* `mwlib <http://pypi.python.org/pypi/mwlib>`_ mediawiki parser and utility library
+* `The Translate Toolkit <http://translate.sourceforge.net/wiki/toolkit/index>`_ for localization and conversion
+* `execnet <http://codespeak.net/execnet>`_ rapid multi-Python deployment
+* `pylib <http://py.rtfd.org>`_ cross-platform path, IO, dynamic code library
+* `Pacha <http://pacha.cafepais.com/>`_ configuration management in five minutes
+* `bbfreeze <http://pypi.python.org/pypi/bbfreeze>`_ create standalone executables from Python scripts
+* `pdb++ <http://bitbucket.org/antocuni/pdb>`_ a fancier version of PDB
+* `py-s3fuse <http://code.google.com/p/py-s3fuse/>`_ Amazon S3 FUSE based filesystem
+* `waskr <http://code.google.com/p/waskr/>`_ WSGI Stats Middleware
+* `guachi <http://code.google.com/p/guachi/>`_ global persistent configs for Python modules
+* `Circuits <http://pypi.python.org/pypi/circuits>`_ lightweight Event Driven Framework
+* `pygtk-helpers <http://bitbucket.org/aafshar/pygtkhelpers-main/>`_ easy interaction with PyGTK
+* `QuantumCore <http://quantumcore.org/>`_ statusmessage and repoze openid plugin
+* `pydataportability <http://pydataportability.net/>`_ libraries for managing the open web
+* `XIST <http://www.livinglogic.de/Python/xist/>`_ extensible HTML/XML generator
+* `tiddlyweb <http://pypi.python.org/pypi/tiddlyweb>`_ optionally headless, extensible RESTful datastore
+* `fancycompleter <http://bitbucket.org/antocuni/fancycompleter/src>`_ for colorful tab-completion
+* `Paludis <http://paludis.exherbo.org/>`_ tools for Gentoo Paludis package manager
+* `Gerald <http://halfcooked.com/code/gerald/>`_ schema comparison tool
+* `abjad <http://code.google.com/p/abjad/>`_ Python API for Formalized Score control
+* `bu <http://packages.python.org/bu/>`_ a microscopic build system
+* `katcp <https://bitbucket.org/hodgestar/katcp>`_ Telescope communication protocol over Twisted
+* `kss plugin timer <http://pypi.python.org/pypi/kss.plugin.timer>`_
+* `pyudev <http://pyudev.readthedocs.org/en/latest/tests/plugins.html>`_ a pure Python binding to the Linux library libudev
+* `pytest-localserver <https://bitbucket.org/basti/pytest-localserver/>`_ a plugin for pytest that provides a httpserver and smtpserver
+* `pytest-monkeyplus <http://pypi.python.org/pypi/pytest-monkeyplus/>`_ a plugin that extends monkeypatch
+
+These projects help integrate ``pytest`` into other Python frameworks:
+
+* `pytest-django <http://pypi.python.org/pypi/pytest-django/>`_ for Django
+* `zope.pytest <http://packages.python.org/zope.pytest/>`_ for Zope and Grok
+* `pytest_gae <http://pypi.python.org/pypi/pytest_gae/0.2.1>`_ for Google App Engine
+* There is `some work <https://github.com/Kotti/Kotti/blob/master/kotti/testing.py>`_ underway for Kotti, a CMS built in Pyramid/Pylons
+
+
+Some organisations using pytest
+-----------------------------------
+
+* `Square Kilometre Array, Cape Town <http://ska.ac.za/>`_
+* `Some Mozilla QA people <http://www.theautomatedtester.co.uk/blog/2011/pytest_and_xdist_plugin.html>`_ use pytest to distribute their Selenium tests
+* `Tandberg <http://www.tandberg.com/>`_
+* `Shootq <http://web.shootq.com/>`_
+* `Stups department of Heinrich Heine University Duesseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_
+* `cellzome <http://www.cellzome.com/>`_
+* `Open End, Gothenborg <http://www.openend.se>`_
+* `Laboratory of Bioinformatics, Warsaw <http://genesilico.pl/>`_
+* `merlinux, Germany <http://merlinux.eu>`_
+* `ESSS, Brazil <http://www.esss.com.br>`_
+* many more ... (please be so kind to send a note via :ref:`contact`)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/pytest.ini b/testing/web-platform/tests/tools/pytest/doc/en/pytest.ini
new file mode 100644
index 000000000..760436056
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+# just defined to prevent the root level tox.ini from kicking in
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/recwarn.rst b/testing/web-platform/tests/tools/pytest/doc/en/recwarn.rst
new file mode 100644
index 000000000..3c42bfaaf
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/recwarn.rst
@@ -0,0 +1,130 @@
+
+Asserting Warnings
+=====================================================
+
+.. _warns:
+
+Asserting warnings with the warns function
+-----------------------------------------------
+
+.. versionadded:: 2.8
+
+You can check that code raises a particular warning using ``pytest.warns``,
+which works in a similar manner to :ref:`raises <assertraises>`::
+
+ import warnings
+ import pytest
+
+ def test_warning():
+ with pytest.warns(UserWarning):
+ warnings.warn("my warning", UserWarning)
+
+The test will fail if the warning in question is not raised.
+
+You can also call ``pytest.warns`` on a function or code string::
+
+ pytest.warns(expected_warning, func, *args, **kwargs)
+ pytest.warns(expected_warning, "func(*args, **kwargs)")
+
+The function also returns a list of all raised warnings (as
+``warnings.WarningMessage`` objects), which you can query for
+additional information::
+
+ with pytest.warns(RuntimeWarning) as record:
+ warnings.warn("another warning", RuntimeWarning)
+
+ # check that only one warning was raised
+ assert len(record) == 1
+ # check that the message matches
+ assert record[0].message.args[0] == "another warning"
+
+Alternatively, you can examine raised warnings in detail using the
+:ref:`recwarn <recwarn>` fixture (see below).
+
+.. note::
+ ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
+ differently; see :ref:`ensuring_function_triggers`.
+
+.. _recwarn:
+
+Recording warnings
+------------------------
+
+You can record raised warnings either using ``pytest.warns`` or with
+the ``recwarn`` fixture.
+
+To record with ``pytest.warns`` without asserting anything about the warnings,
+pass ``None`` as the expected warning type::
+
+ with pytest.warns(None) as record:
+ warnings.warn("user", UserWarning)
+ warnings.warn("runtime", RuntimeWarning)
+
+ assert len(record) == 2
+ assert str(record[0].message) == "user"
+ assert str(record[1].message) == "runtime"
+
+The ``recwarn`` fixture will record warnings for the whole function::
+
+ import warnings
+
+ def test_hello(recwarn):
+ warnings.warn("hello", UserWarning)
+ assert len(recwarn) == 1
+ w = recwarn.pop(UserWarning)
+ assert issubclass(w.category, UserWarning)
+ assert str(w.message) == "hello"
+ assert w.filename
+ assert w.lineno
+
+Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded
+warnings: a WarningsRecorder instance. To view the recorded warnings, you can
+iterate over this instance, call ``len`` on it to get the number of recorded
+warnings, or index into it to get a particular recorded warning. It also
+provides these methods:
+
+.. autoclass:: _pytest.recwarn.WarningsRecorder()
+ :members:
+
+Each recorded warning has the attributes ``message``, ``category``,
+``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the
+class of the warning. The ``message`` is the warning itself; calling
+``str(message)`` will return the actual message of the warning.
+
+.. note::
+ ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
+ differently; see :ref:`ensuring_function_triggers`.
+
+.. _ensuring_function_triggers:
+
+Ensuring a function triggers a deprecation warning
+-------------------------------------------------------
+
+You can also call a global helper for checking
+that a certain function call triggers a ``DeprecationWarning`` or
+``PendingDeprecationWarning``::
+
+ import pytest
+
+ def test_global():
+ pytest.deprecated_call(myfunction, 17)
+
+By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
+caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
+them. If you wish to record them in your own code, use the
+command ``warnings.simplefilter('always')``::
+
+ import warnings
+ import pytest
+
+ def test_deprecation(recwarn):
+ warnings.simplefilter('always')
+ warnings.warn("deprecated", DeprecationWarning)
+ assert len(recwarn) == 1
+ assert recwarn.pop(DeprecationWarning)
+
+You can also use it as a contextmanager::
+
+ def test_global():
+ with pytest.deprecated_call():
+ myobject.deprecated_method()
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/setup.rst b/testing/web-platform/tests/tools/pytest/doc/en/setup.rst
new file mode 100644
index 000000000..fe2353465
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/setup.rst
@@ -0,0 +1,10 @@
+
+setup: is now an "autouse fixture"
+========================================================
+
+During development prior to the pytest-2.3 release the name
+``pytest.setup`` was used but before the release it was renamed
+and moved to become part of the general fixture mechanism,
+namely :ref:`autouse fixtures`
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/skipping.rst b/testing/web-platform/tests/tools/pytest/doc/en/skipping.rst
new file mode 100644
index 000000000..4282afb77
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/skipping.rst
@@ -0,0 +1,373 @@
+.. _`skip and xfail`:
+
+.. _skipping:
+
+Skip and xfail: dealing with tests that can not succeed
+=====================================================================
+
+If you have test functions that cannot be run on certain platforms
+or that you expect to fail you can mark them accordingly or you
+may call helper functions during execution of setup or test functions.
+
+A *skip* means that you expect your test to pass unless the environment
+(e.g. wrong Python interpreter, missing dependency) prevents it to run.
+And *xfail* means that your test can run but you expect it to fail
+because there is an implementation problem.
+
+``pytest`` counts and lists *skip* and *xfail* tests separately. Detailed
+information about skipped/xfailed tests is not shown by default to avoid
+cluttering the output. You can use the ``-r`` option to see details
+corresponding to the "short" letters shown in the test progress::
+
+ py.test -rxs # show extra info on skips and xfails
+
+(See :ref:`how to change command line options defaults`)
+
+.. _skipif:
+.. _`condition booleans`:
+
+Marking a test function to be skipped
+-------------------------------------------
+
+.. versionadded:: 2.9
+
+The simplest way to skip a test function is to mark it with the ``skip`` decorator
+which may be passed an optional ``reason``:
+
+.. code-block:: python
+
+ @pytest.mark.skip(reason="no way of currently testing this")
+ def test_the_unknown():
+ ...
+
+``skipif``
+~~~~~~~~~~
+
+.. versionadded:: 2.0, 2.4
+
+If you wish to skip something conditionally then you can use ``skipif`` instead.
+Here is an example of marking a test function to be skipped
+when run on a Python3.3 interpreter::
+
+ import sys
+ @pytest.mark.skipif(sys.version_info < (3,3),
+ reason="requires python3.3")
+ def test_function():
+ ...
+
+During test function setup the condition ("sys.version_info >= (3,3)") is
+checked. If it evaluates to True, the test function will be skipped
+with the specified reason. Note that pytest enforces specifying a reason
+in order to report meaningful "skip reasons" (e.g. when using ``-rs``).
+If the condition is a string, it will be evaluated as python expression.
+
+You can share skipif markers between modules. Consider this test module::
+
+ # content of test_mymodule.py
+
+ import mymodule
+ minversion = pytest.mark.skipif(mymodule.__versioninfo__ < (1,1),
+ reason="at least mymodule-1.1 required")
+ @minversion
+ def test_function():
+ ...
+
+You can import it from another test module::
+
+ # test_myothermodule.py
+ from test_mymodule import minversion
+
+ @minversion
+ def test_anotherfunction():
+ ...
+
+For larger test suites it's usually a good idea to have one file
+where you define the markers which you then consistently apply
+throughout your test suite.
+
+Alternatively, the pre pytest-2.4 way to specify :ref:`condition strings
+<string conditions>` instead of booleans will remain fully supported in future
+versions of pytest. It couldn't be easily used for importing markers
+between test modules so it's no longer advertised as the primary method.
+
+
+Skip all test functions of a class or module
+---------------------------------------------
+
+You can use the ``skipif`` decorator (and any other marker) on classes::
+
+ @pytest.mark.skipif(sys.platform == 'win32',
+ reason="does not run on windows")
+ class TestPosixCalls:
+
+ def test_function(self):
+ "will not be setup or run under 'win32' platform"
+
+If the condition is true, this marker will produce a skip result for
+each of the test methods.
+
+If you want to skip all test functions of a module, you must use
+the ``pytestmark`` name on the global level:
+
+.. code-block:: python
+
+ # test_module.py
+ pytestmark = pytest.mark.skipif(...)
+
+If multiple "skipif" decorators are applied to a test function, it
+will be skipped if any of the skip conditions is true.
+
+.. _`whole class- or module level`: mark.html#scoped-marking
+
+.. _xfail:
+
+Mark a test function as expected to fail
+-------------------------------------------------------
+
+You can use the ``xfail`` marker to indicate that you
+expect a test to fail::
+
+ @pytest.mark.xfail
+ def test_function():
+ ...
+
+This test will be run but no traceback will be reported
+when it fails. Instead terminal reporting will list it in the
+"expected to fail" (``XFAIL``) or "unexpectedly passing" (``XPASS``) sections.
+
+``strict`` parameter
+~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 2.9
+
+Both ``XFAIL`` and ``XPASS`` don't fail the test suite, unless the ``strict`` keyword-only
+parameter is passed as ``True``:
+
+.. code-block:: python
+
+ @pytest.mark.xfail(strict=True)
+ def test_function():
+ ...
+
+
+This will make ``XPASS`` ("unexpectedly passing") results from this test to fail the test suite.
+
+You can change the default value of the ``strict`` parameter using the
+``xfail_strict`` ini option:
+
+.. code-block:: ini
+
+ [pytest]
+ xfail_strict=true
+
+
+``reason`` parameter
+~~~~~~~~~~~~~~~~~~~~
+
+As with skipif_ you can also mark your expectation of a failure
+on a particular platform::
+
+ @pytest.mark.xfail(sys.version_info >= (3,3),
+ reason="python3.3 api changes")
+ def test_function():
+ ...
+
+
+``raises`` parameter
+~~~~~~~~~~~~~~~~~~~~
+
+If you want to be more specific as to why the test is failing, you can specify
+a single exception, or a list of exceptions, in the ``raises`` argument.
+
+.. code-block:: python
+
+ @pytest.mark.xfail(raises=RuntimeError)
+ def test_function():
+ ...
+
+Then the test will be reported as a regular failure if it fails with an
+exception not mentioned in ``raises``.
+
+``run`` parameter
+~~~~~~~~~~~~~~~~~
+
+If a test should be marked as xfail and reported as such but should not be
+even executed, use the ``run`` parameter as ``False``:
+
+.. code-block:: python
+
+ @pytest.mark.xfail(run=False)
+ def test_function():
+ ...
+
+This is specially useful for marking crashing tests for later inspection.
+
+
+Ignoring xfail marks
+~~~~~~~~~~~~~~~~~~~~
+
+By specifying on the commandline::
+
+ pytest --runxfail
+
+you can force the running and reporting of an ``xfail`` marked test
+as if it weren't marked at all.
+
+Examples
+~~~~~~~~
+
+Here is a simple test file with the several usages:
+
+.. literalinclude:: example/xfail_demo.py
+
+Running it with the report-on-xfail option gives this output::
+
+ example $ py.test -rx xfail_demo.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/example, inifile:
+ collected 7 items
+
+ xfail_demo.py xxxxxxx
+ ======= short test summary info ========
+ XFAIL xfail_demo.py::test_hello
+ XFAIL xfail_demo.py::test_hello2
+ reason: [NOTRUN]
+ XFAIL xfail_demo.py::test_hello3
+ condition: hasattr(os, 'sep')
+ XFAIL xfail_demo.py::test_hello4
+ bug 110
+ XFAIL xfail_demo.py::test_hello5
+ condition: pytest.__version__[0] != "17"
+ XFAIL xfail_demo.py::test_hello6
+ reason: reason
+ XFAIL xfail_demo.py::test_hello7
+
+ ======= 7 xfailed in 0.12 seconds ========
+
+xfail signature summary
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Here's the signature of the ``xfail`` marker, using Python 3 keyword-only
+arguments syntax:
+
+.. code-block:: python
+
+ def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False):
+
+
+
+.. _`skip/xfail with parametrize`:
+
+Skip/xfail with parametrize
+---------------------------
+
+It is possible to apply markers like skip and xfail to individual
+test instances when using parametrize::
+
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail((1, 0)),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
+ (2, 3),
+ (3, 4),
+ (4, 5),
+ pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
+
+Imperative xfail from within a test or setup function
+------------------------------------------------------
+
+If you cannot declare xfail- of skipif conditions at import
+time you can also imperatively produce an according outcome
+imperatively, in test or setup code::
+
+ def test_function():
+ if not valid_config():
+ pytest.xfail("failing configuration (but should work)")
+ # or
+ pytest.skip("unsupported configuration")
+
+
+Skipping on a missing import dependency
+--------------------------------------------------
+
+You can use the following import helper at module level
+or within a test or test setup function::
+
+ docutils = pytest.importorskip("docutils")
+
+If ``docutils`` cannot be imported here, this will lead to a
+skip outcome of the test. You can also skip based on the
+version number of a library::
+
+ docutils = pytest.importorskip("docutils", minversion="0.3")
+
+The version will be read from the specified
+module's ``__version__`` attribute.
+
+
+.. _string conditions:
+
+specifying conditions as strings versus booleans
+----------------------------------------------------------
+
+Prior to pytest-2.4 the only way to specify skipif/xfail conditions was
+to use strings::
+
+ import sys
+ @pytest.mark.skipif("sys.version_info >= (3,3)")
+ def test_function():
+ ...
+
+During test function setup the skipif condition is evaluated by calling
+``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains
+all the module globals, and ``os`` and ``sys`` as a minimum.
+
+Since pytest-2.4 `condition booleans`_ are considered preferable
+because markers can then be freely imported between test modules.
+With strings you need to import not only the marker but all variables
+everything used by the marker, which violates encapsulation.
+
+The reason for specifying the condition as a string was that ``pytest`` can
+report a summary of skip conditions based purely on the condition string.
+With conditions as booleans you are required to specify a ``reason`` string.
+
+Note that string conditions will remain fully supported and you are free
+to use them if you have no need for cross-importing markers.
+
+The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)``
+or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace
+dictionary which is constructed as follows:
+
+* the namespace is initialized by putting the ``sys`` and ``os`` modules
+ and the pytest ``config`` object into it.
+
+* updated with the module globals of the test function for which the
+ expression is applied.
+
+The pytest ``config`` object allows you to skip based on a test
+configuration value which you might have added::
+
+ @pytest.mark.skipif("not config.getvalue('db')")
+ def test_function(...):
+ ...
+
+The equivalent with "boolean conditions" is::
+
+ @pytest.mark.skipif(not pytest.config.getvalue("db"),
+ reason="--db was not specified")
+ def test_function(...):
+ pass
+
+.. note::
+
+ You cannot use ``pytest.config.getvalue()`` in code
+ imported before py.test's argument parsing takes place. For example,
+ ``conftest.py`` files are imported before command line parsing and thus
+ ``config.getvalue()`` will not execute correctly.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/status.rst b/testing/web-platform/tests/tools/pytest/doc/en/status.rst
new file mode 100644
index 000000000..3c7bf70ea
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/status.rst
@@ -0,0 +1,5 @@
+pytest development status
+================================
+
+https://travis-ci.org/pytest-dev/pytest
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/talks.rst b/testing/web-platform/tests/tools/pytest/doc/en/talks.rst
new file mode 100644
index 000000000..7a5221845
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/talks.rst
@@ -0,0 +1,116 @@
+
+Talks and Tutorials
+==========================
+
+.. sidebar:: Next Open Trainings
+
+ `professional testing with pytest and tox <http://www.python-academy.com/courses/specialtopics/python_course_testing.html>`_, 27-29th June 2016, Freiburg, Germany
+
+.. _`funcargs`: funcargs.html
+
+Talks and blog postings
+---------------------------------------------
+
+.. _`tutorial1 repository`: http://bitbucket.org/pytest-dev/pytest-tutorial1/
+.. _`pycon 2010 tutorial PDF`: http://bitbucket.org/pytest-dev/pytest-tutorial1/raw/tip/pytest-basic.pdf
+
+- `pytest - Rapid Simple Testing, Florian Bruhin, Swiss Python Summit 2016
+ <https://www.youtube.com/watch?v=rCBHkQ_LVIs>`_.
+
+- `Improve your testing with Pytest and Mock, Gabe Hollombe, PyCon SG 2015
+ <https://www.youtube.com/watch?v=RcN26hznmk4>`_.
+
+- `Introduction to pytest, Andreas Pelme, EuroPython 2014
+ <https://www.youtube.com/watch?v=LdVJj65ikRY>`_.
+
+- `Advanced Uses of py.test Fixtures, Floris Bruynooghe, EuroPython
+ 2014 <https://www.youtube.com/watch?v=IBC_dxr-4ps>`_.
+
+- `Why i use py.test and maybe you should too, Andy Todd, Pycon AU 2013
+ <https://www.youtube.com/watch?v=P-AhpukDIik>`_
+
+- `3-part blog series about pytest from @pydanny alias Daniel Greenfeld (January
+ 2014) <http://pydanny.com/pytest-no-boilerplate-testing.html>`_
+
+- `pytest: helps you write better Django apps, Andreas Pelme, DjangoCon
+ Europe 2014 <https://www.youtube.com/watch?v=aaArYVh6XSM>`_.
+
+- :ref:`fixtures`
+
+- `Testing Django Applications with pytest, Andreas Pelme, EuroPython
+ 2013 <https://www.youtube.com/watch?v=aUf8Fkb7TaY>`_.
+
+- `Testes pythonics com py.test, Vinicius Belchior Assef Neto, Plone
+ Conf 2013, Brazil <https://www.youtube.com/watch?v=QUKoq2K7bis>`_.
+
+- `Introduction to py.test fixtures, FOSDEM 2013, Floris Bruynooghe
+ <https://www.youtube.com/watch?v=bJhRW4eZMco>`_.
+
+- `pytest feature and release highlights, Holger Krekel (GERMAN, October 2013)
+ <http://pyvideo.org/video/2429/pytest-feature-and-new-release-highlights>`_
+
+- `pytest introduction from Brian Okken (January 2013)
+ <http://pythontesting.net/framework/pytest-introduction/>`_
+
+- `monkey patching done right`_ (blog post, consult `monkeypatch
+ plugin`_ for up-to-date API)
+
+Test parametrization:
+
+- `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API.
+- `test generators and cached setup`_
+- `parametrizing tests, generalized`_ (blog post)
+- `putting test-hooks into local or global plugins`_ (blog post)
+
+Assertion introspection:
+
+- `(07/2011) Behind the scenes of pytest's new assertion rewriting
+ <http://pybites.blogspot.com/2011/07/behind-scenes-of-pytests-new-assertion.html>`_
+
+Distributed testing:
+
+- `simultaneously test your code on all platforms`_ (blog entry)
+
+Plugin specific examples:
+
+- `skipping slow tests by default in pytest`_ (blog entry)
+
+- `many examples in the docs for plugins`_
+
+.. _`skipping slow tests by default in pytest`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
+.. _`many examples in the docs for plugins`: plugin/index.html
+.. _`monkeypatch plugin`: plugin/monkeypatch.html
+.. _`application setup in test functions with funcargs`: funcargs.html#appsetup
+.. _`simultaneously test your code on all platforms`: http://tetamap.wordpress.com/2009/03/23/new-simultanously-test-your-code-on-all-platforms/
+.. _`monkey patching done right`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
+.. _`putting test-hooks into local or global plugins`: http://tetamap.wordpress.com/2009/05/14/putting-test-hooks-into-local-and-global-plugins/
+.. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
+.. _`generating parametrized tests with funcargs`: funcargs.html#test-generators
+.. _`test generators and cached setup`: http://bruynooghe.blogspot.com/2010/06/pytest-test-generators-and-cached-setup.html
+
+Older conference talks and tutorials
+----------------------------------------
+
+- `pycon australia 2012 pytest talk from Brianna Laugher
+ <http://2012.pycon-au.org/schedule/52/view_talk?day=sunday>`_ (`video <http://www.youtube.com/watch?v=DTNejE9EraI>`_, `slides <http://www.slideshare.net/pfctdayelise/funcargs-other-fun-with-pytest>`_, `code <https://gist.github.com/3386951>`_)
+- `pycon 2012 US talk video from Holger Krekel <http://www.youtube.com/watch?v=9LVqBQcFmyw>`_
+- `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_
+
+- `ep2009-rapidtesting.pdf`_ tutorial slides (July 2009):
+
+ - testing terminology
+ - basic pytest usage, file system layout
+ - test function arguments (funcargs_) and test fixtures
+ - existing plugins
+ - distributed testing
+
+- `ep2009-pytest.pdf`_ 60 minute pytest talk, highlighting unique features and a roadmap (July 2009)
+
+- `pycon2009-pytest-introduction.zip`_ slides and files, extended version of pytest basic introduction, discusses more options, also introduces old-style xUnit setup, looponfailing and other features.
+
+- `pycon2009-pytest-advanced.pdf`_ contain a slightly older version of funcargs and distributed testing, compared to the EuroPython 2009 slides.
+
+.. _`ep2009-rapidtesting.pdf`: http://codespeak.net/download/py/ep2009-rapidtesting.pdf
+.. _`ep2009-pytest.pdf`: http://codespeak.net/download/py/ep2009-pytest.pdf
+.. _`pycon2009-pytest-introduction.zip`: http://codespeak.net/download/py/pycon2009-pytest-introduction.zip
+.. _`pycon2009-pytest-advanced.pdf`: http://codespeak.net/download/py/pycon2009-pytest-advanced.pdf
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/attic.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/attic.rst
new file mode 100644
index 000000000..6408c7225
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/attic.rst
@@ -0,0 +1,117 @@
+===============================================
+ATTIC documentation
+===============================================
+
+XXX REVIEW and remove the below XXX
+
+Customizing the testing process
+===============================
+
+writing conftest.py files
+-----------------------------------
+
+You may put conftest.py files containing project-specific
+configuration in your project's root directory, it's usually
+best to put it just into the same directory level as your
+topmost ``__init__.py``. In fact, ``pytest`` performs
+an "upwards" search starting from the directory that you specify
+to be tested and will lookup configuration values right-to-left.
+You may have options that reside e.g. in your home directory
+but note that project specific settings will be considered
+first. There is a flag that helps you debugging your
+conftest.py configurations::
+
+ py.test --traceconfig
+
+
+customizing the collecting and running process
+-----------------------------------------------
+
+To introduce different test items you can create
+one or more ``conftest.py`` files in your project.
+When the collection process traverses directories
+and modules the default collectors will produce
+custom Collectors and Items if they are found
+in a local ``conftest.py`` file.
+
+
+Customizing the collection process in a module
+----------------------------------------------
+
+If you have a module where you want to take responsibility for
+collecting your own test Items and possibly even for executing
+a test then you can provide `generative tests`_ that yield
+callables and possibly arguments as a tuple. This is especially
+useful for calling application test machinery with different
+parameter sets but counting each of the calls as a separate
+tests.
+
+.. _`generative tests`: features.html#generative-tests
+
+The other extension possibility is about
+specifying a custom test ``Item`` class which
+is responsible for setting up and executing an underlying
+test. Or you can extend the collection process for a whole
+directory tree by putting Items in a ``conftest.py`` configuration file.
+The collection process dynamically consults the *chain of conftest.py*
+modules to determine collectors and items at ``Directory``, ``Module``,
+``Class``, ``Function`` or ``Generator`` level respectively.
+
+Customizing execution of Items and Functions
+----------------------------------------------------
+
+- ``pytest.Function`` test items control execution
+ of a test function through its ``function.runtest()`` method.
+ This method is responsible for performing setup and teardown
+ ("Test Fixtures") for a test Function.
+
+- ``Function.execute(target, *args)`` methods are invoked by
+ the default ``Function.run()`` to actually execute a python
+ function with the given (usually empty set of) arguments.
+
+.. _`py-dev mailing list`: http://codespeak.net/mailman/listinfo/py-dev
+
+
+.. _`test generators`: funcargs.html#test-generators
+
+.. _`generative tests`:
+
+generative tests: yielding parametrized tests
+====================================================
+
+Deprecated since 1.0 in favour of `test generators`_.
+
+*Generative tests* are test methods that are *generator functions* which
+``yield`` callables and their arguments. This is useful for running a
+test function multiple times against different parameters. Example::
+
+ def test_generative():
+ for x in (42,17,49):
+ yield check, x
+
+ def check(arg):
+ assert arg % 7 == 0 # second generated tests fails!
+
+Note that ``test_generative()`` will cause three tests
+to get run, notably ``check(42)``, ``check(17)`` and ``check(49)``
+of which the middle one will obviously fail.
+
+To make it easier to distinguish the generated tests it is possible to specify an explicit name for them, like for example::
+
+ def test_generative():
+ for x in (42,17,49):
+ yield "case %d" % x, check, x
+
+
+disabling a test class
+----------------------
+
+If you want to disable a complete test class you
+can set the class-level attribute ``disabled``.
+For example, in order to avoid running some tests on Win32::
+
+ class TestPosixOnly:
+ disabled = sys.platform == 'win32'
+
+ def test_xxx(self):
+ ...
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/config.html b/testing/web-platform/tests/tools/pytest/doc/en/test/config.html
new file mode 100644
index 000000000..cba5a46f9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/config.html
@@ -0,0 +1,18 @@
+<html>
+ <head>
+ <meta http-equiv="refresh" content=" 1 ; URL=customize.html" />
+ </head>
+
+ <body>
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-7597274-3");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+</body>
+</html>
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/dist.html b/testing/web-platform/tests/tools/pytest/doc/en/test/dist.html
new file mode 100644
index 000000000..e328550a8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/dist.html
@@ -0,0 +1,18 @@
+<html>
+ <head>
+ <meta http-equiv="refresh" content=" 1 ; URL=plugin/xdist.html" />
+ </head>
+
+ <body>
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-7597274-3");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+</body>
+</html>
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/extend.html b/testing/web-platform/tests/tools/pytest/doc/en/test/extend.html
new file mode 100644
index 000000000..cba5a46f9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/extend.html
@@ -0,0 +1,18 @@
+<html>
+ <head>
+ <meta http-equiv="refresh" content=" 1 ; URL=customize.html" />
+ </head>
+
+ <body>
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-7597274-3");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+</body>
+</html>
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/index.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/index.rst
new file mode 100644
index 000000000..1a3b5a54d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/index.rst
@@ -0,0 +1,35 @@
+=======================================
+pytest documentation index
+=======================================
+
+
+features_: overview and discussion of features.
+
+quickstart_: getting started with writing a simple test.
+
+`talks, tutorials, examples`_: tutorial examples, slides
+
+funcargs_: powerful parametrized test function setup
+
+`plugins`_: list of available plugins with usage examples and feature details.
+
+customize_: configuration, customization, extensions
+
+changelog_: history of changes covering last releases
+
+**Continuous Integration of pytest's own tests and plugins with Hudson**:
+
+ `http://hudson.testrun.org/view/pytest`_
+
+.. _`http://hudson.testrun.org/view/pytest`: http://hudson.testrun.org/view/pytest/
+
+
+.. _changelog: ../changelog.html
+.. _`plugins`: plugin/index.html
+.. _`talks, tutorials, examples`: talks.html
+.. _quickstart: quickstart.html
+.. _features: features.html
+.. _funcargs: funcargs.html
+.. _customize: customize.html
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/mission.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/mission.rst
new file mode 100644
index 000000000..cda8d9a72
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/mission.rst
@@ -0,0 +1,13 @@
+
+Mission
+====================================
+
+``pytest`` strives to make testing a fun and no-boilerplate effort.
+
+The tool is distributed as a `pytest` package. Its project independent
+``py.test`` command line tool helps you to:
+
+* rapidly collect and run tests
+* run unit- or doctests, functional or integration tests
+* distribute tests to multiple environments
+* use local or global plugins for custom test types and setup
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/cov.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/cov.rst
new file mode 100644
index 000000000..355093f25
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/cov.rst
@@ -0,0 +1,230 @@
+
+produce code coverage reports using the 'coverage' package, including support for distributed testing.
+======================================================================================================
+
+
+.. contents::
+ :local:
+
+This plugin produces coverage reports. It supports centralised testing and distributed testing in
+both load and each modes. It also supports coverage of subprocesses.
+
+All features offered by the coverage package should be available, either through pytest-cov or
+through coverage's config file.
+
+
+Installation
+------------
+
+The `pytest-cov`_ package may be installed with pip or easy_install::
+
+ pip install pytest-cov
+ easy_install pytest-cov
+
+.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/
+
+
+Uninstallation
+--------------
+
+Uninstalling packages is supported by pip::
+
+ pip uninstall pytest-cov
+
+However easy_install does not provide an uninstall facility.
+
+.. IMPORTANT::
+
+ Ensure that you manually delete the init_covmain.pth file in your
+ site-packages directory.
+
+ This file starts coverage collection of subprocesses if appropriate during
+ site initialization at python startup.
+
+
+Usage
+-----
+
+Centralised Testing
+~~~~~~~~~~~~~~~~~~~
+
+Centralised testing will report on the combined coverage of the main process and all of it's
+subprocesses.
+
+Running centralised testing::
+
+ py.test --cov myproj tests/
+
+Shows a terminal report::
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Distributed Testing: Load
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Distributed testing with dist mode set to load will report on the combined coverage of all slaves.
+The slaves may be spread out over any number of hosts and each slave may be located anywhere on the
+file system. Each slave will have it's subprocesses measured.
+
+Running distributed testing with dist mode set to load::
+
+ py.test --cov myproj -n 2 tests/
+
+Shows a terminal report::
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Again but spread over different hosts and different directories::
+
+ py.test --cov myproj --dist load
+ --tx ssh=memedough@host1//chdir=testenv1
+ --tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python
+ --rsyncdir myproj --rsyncdir tests --rsync examples
+ tests/
+
+Shows a terminal report::
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Distributed Testing: Each
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Distributed testing with dist mode set to each will report on the combined coverage of all slaves.
+Since each slave is running all tests this allows generating a combined coverage report for multiple
+environments.
+
+Running distributed testing with dist mode set to each::
+
+ py.test --cov myproj --dist each
+ --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python
+ --tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python
+ --rsyncdir myproj --rsyncdir tests --rsync examples
+ tests/
+
+Shows a terminal report::
+
+ ---------------------------------------- coverage ----------------------------------------
+ platform linux2, python 2.6.5-final-0
+ platform linux2, python 2.7.0-final-0
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Reporting
+---------
+
+It is possible to generate any combination of the reports for a single test run.
+
+The available reports are terminal (with or without missing line numbers shown), HTML, XML and
+annotated source code.
+
+The terminal report without line numbers (default)::
+
+ py.test --cov-report term --cov myproj tests/
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+The terminal report with line numbers::
+
+ py.test --cov-report term-missing --cov myproj tests/
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover Missing
+ --------------------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370
+ myproj/feature4286 94 7 92% 183-188, 197
+ --------------------------------------------------
+ TOTAL 353 20 94%
+
+
+The remaining three reports output to files without showing anything on the terminal (useful for
+when the output is going to a continuous integration server)::
+
+ py.test --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/
+
+
+Coverage Data File
+------------------
+
+The data file is erased at the beginning of testing to ensure clean data for each test run.
+
+The data file is left at the end of testing so that it is possible to use normal coverage tools to
+examine it.
+
+
+Limitations
+-----------
+
+For distributed testing the slaves must have the pytest-cov package installed. This is needed since
+the plugin must be registered through setuptools / distribute for pytest to start the plugin on the
+slave.
+
+For subprocess measurement environment variables must make it from the main process to the
+subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must
+do normal site initialization so that the environment variables can be detected and coverage
+started.
+
+
+Acknowledgments
+----------------
+
+Holger Krekel for pytest with its distributed testing support.
+
+Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs.
+
+Whilst this plugin has been built fresh from the ground up to support distributed testing it has
+been influenced by the work done on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and
+nose-cover (Jason Pellerin) which are other coverage plugins for pytest and nose respectively.
+
+No doubt others have contributed to these tools as well.
+
+command line options
+--------------------
+
+
+``--cov=path``
+ measure coverage for filesystem path (multi-allowed)
+``--cov-report=type``
+ type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)
+``--cov-config=path``
+ config file for coverage, default: .coveragerc
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/coverage.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/coverage.rst
new file mode 100644
index 000000000..965b4a4ee
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/coverage.rst
@@ -0,0 +1,51 @@
+
+Write and report coverage data with the 'coverage' package.
+===========================================================
+
+
+.. contents::
+ :local:
+
+Note: Original code by Ross Lawley.
+
+Install
+--------------
+
+Use pip to (un)install::
+
+ pip install pytest-coverage
+ pip uninstall pytest-coverage
+
+or alternatively use easy_install to install::
+
+ easy_install pytest-coverage
+
+
+Usage
+-------------
+
+To get full test coverage reports for a particular package type::
+
+ py.test --cover-report=report
+
+command line options
+--------------------
+
+
+``--cover=COVERPACKAGES``
+ (multi allowed) only include info from specified package.
+``--cover-report=REPORT_TYPE``
+ html: Directory for html output.
+ report: Output a text report.
+ annotate: Annotate your source code for which lines were executed and which were not.
+ xml: Output an xml report compatible with the cobertura plugin for hudson.
+``--cover-directory=DIRECTORY``
+ Directory for the reports (html / annotate results) defaults to ./coverage
+``--cover-xml-file=XML_FILE``
+ File for the xml report defaults to ./coverage.xml
+``--cover-show-missing``
+ Show missing files
+``--cover-ignore-errors=IGNORE_ERRORS``
+ Ignore errors of finding source files for code.
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/django.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/django.rst
new file mode 100644
index 000000000..061497b38
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/django.rst
@@ -0,0 +1,7 @@
+pytest_django plugin (EXTERNAL)
+==========================================
+
+pytest_django is a plugin for ``pytest`` that provides a set of useful tools for testing Django applications, checkout Ben Firshman's `pytest_django github page`_.
+
+.. _`pytest_django github page`: http://github.com/bfirsh/pytest_django/tree/master
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/figleaf.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/figleaf.rst
new file mode 100644
index 000000000..86e0da65b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/figleaf.rst
@@ -0,0 +1,44 @@
+
+report test coverage using the 'figleaf' package.
+=================================================
+
+
+.. contents::
+ :local:
+
+Install
+---------------
+
+To install the plugin issue::
+
+ easy_install pytest-figleaf # or
+ pip install pytest-figleaf
+
+and if you are using pip you can also uninstall::
+
+ pip uninstall pytest-figleaf
+
+
+Usage
+---------------
+
+After installation you can simply type::
+
+ py.test --figleaf [...]
+
+to enable figleaf coverage in your test run. A default ".figleaf" data file
+and "html" directory will be created. You can use command line options
+to control where data and html files are created.
+
+command line options
+--------------------
+
+
+``--figleaf``
+ trace python coverage with figleaf and write HTML for files below the current working dir
+``--fig-data=dir``
+ set tracing file, default: ".figleaf".
+``--fig-html=dir``
+ set html reporting dir, default "html".
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/genscript.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/genscript.rst
new file mode 100644
index 000000000..ee80f233f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/genscript.rst
@@ -0,0 +1,28 @@
+
+(deprecated) generate standalone test script to be distributed along with an application.
+============================================================================
+
+
+.. contents::
+ :local:
+
+
+
+command line options
+--------------------
+
+
+``--genscript=path``
+ create standalone ``pytest`` script at given target path.
+
+Start improving this plugin in 30 seconds
+=========================================
+
+
+1. Download `pytest_genscript.py`_ plugin source code
+2. put it somewhere as ``pytest_genscript.py`` into your import path
+3. a subsequent ``pytest`` run will use your local version
+
+Checkout customize_, other plugins_ or `get in contact`_.
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/helpconfig.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/helpconfig.rst
new file mode 100644
index 000000000..9b5b8cddd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/helpconfig.rst
@@ -0,0 +1,38 @@
+
+provide version info, conftest/environment config names.
+========================================================
+
+
+.. contents::
+ :local:
+
+
+
+command line options
+--------------------
+
+
+``--version``
+ display py lib version and import information.
+``-p name``
+ early-load given plugin (multi-allowed).
+``--traceconfig``
+ trace considerations of conftest.py files.
+``--nomagic``
+ don't reinterpret asserts, no traceback cutting.
+``--debug``
+ generate and show internal debugging information.
+``--help-config``
+ show available conftest.py and ENV-variable names.
+
+Start improving this plugin in 30 seconds
+=========================================
+
+
+1. Download `pytest_helpconfig.py`_ plugin source code
+2. put it somewhere as ``pytest_helpconfig.py`` into your import path
+3. a subsequent ``pytest`` run will use your local version
+
+Checkout customize_, other plugins_ or `get in contact`_.
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/index.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/index.rst
new file mode 100644
index 000000000..853a4dce6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/index.rst
@@ -0,0 +1,68 @@
+
+advanced python testing
+=======================
+
+skipping_ advanced skipping for python test functions, classes or modules.
+
+mark_ generic mechanism for marking python functions.
+
+pdb_ interactive debugging with the Python Debugger.
+
+figleaf_ (external) report test coverage using the 'figleaf' package.
+
+monkeypatch_ safely patch object attributes, dicts and environment variables.
+
+coverage_ (external) Write and report coverage data with the 'coverage' package.
+
+cov_ (external) produce code coverage reports using the 'coverage' package, including support for distributed testing.
+
+capture_ configurable per-test stdout/stderr capturing mechanisms.
+
+capturelog_ (external) capture output of logging module.
+
+recwarn_ helpers for asserting deprecation and other warnings.
+
+tmpdir_ provide temporary directories to test functions.
+
+
+distributed testing, CI and deployment
+======================================
+
+xdist_ (external) loop on failing tests, distribute test runs to CPUs and hosts.
+
+pastebin_ submit failure or test session information to a pastebin service.
+
+junitxml_ logging of test results in JUnit-XML format, for use with Hudson
+
+resultlog_ non-xml machine-readable logging of test results.
+
+genscript_ generate standalone test script to be distributed along with an application.
+
+
+testing domains and conventions codecheckers
+============================================
+
+oejskit_ (external) run javascript tests in real life browsers
+
+django_ (external) for testing django applications
+
+unittest_ automatically discover and run traditional "unittest.py" style tests.
+
+nose_ nose-compatibility plugin: allow to run nose test suites natively.
+
+doctest_ collect and execute doctests from modules and test files.
+
+restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt files.
+
+
+internal, debugging, help functionality
+=======================================
+
+helpconfig_ provide version info, conftest/environment config names.
+
+terminal_ Implements terminal reporting of the full testing process.
+
+hooklog_ log invocations of extension hooks to a file.
+
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/links.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/links.rst
new file mode 100644
index 000000000..aa965e730
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/links.rst
@@ -0,0 +1,47 @@
+.. _`helpconfig`: helpconfig.html
+.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_recwarn.py
+.. _`unittest`: unittest.html
+.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_monkeypatch.py
+.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_genscript.py
+.. _`pastebin`: pastebin.html
+.. _`skipping`: skipping.html
+.. _`genscript`: genscript.html
+.. _`plugins`: index.html
+.. _`mark`: mark.html
+.. _`tmpdir`: tmpdir.html
+.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_doctest.py
+.. _`capture`: capture.html
+.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_nose.py
+.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_restdoc.py
+.. _`restdoc`: restdoc.html
+.. _`xdist`: xdist.html
+.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pastebin.py
+.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_tmpdir.py
+.. _`terminal`: terminal.html
+.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_hooklog.py
+.. _`capturelog`: capturelog.html
+.. _`junitxml`: junitxml.html
+.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_skipping.py
+.. _`checkout the pytest development version`: ../../install.html#checkout
+.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_helpconfig.py
+.. _`oejskit`: oejskit.html
+.. _`doctest`: doctest.html
+.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_mark.py
+.. _`get in contact`: ../../contact.html
+.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_capture.py
+.. _`figleaf`: figleaf.html
+.. _`customize`: ../customize.html
+.. _`hooklog`: hooklog.html
+.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_terminal.py
+.. _`recwarn`: recwarn.html
+.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pdb.py
+.. _`monkeypatch`: monkeypatch.html
+.. _`coverage`: coverage.html
+.. _`resultlog`: resultlog.html
+.. _`cov`: cov.html
+.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_junitxml.py
+.. _`django`: django.html
+.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_unittest.py
+.. _`nose`: nose.html
+.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_resultlog.py
+.. _`pdb`: pdb.html
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/nose.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/nose.rst
new file mode 100644
index 000000000..f3aa7d705
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/nose.rst
@@ -0,0 +1,56 @@
+
+nose-compatibility plugin: allow to run nose test suites natively.
+==================================================================
+
+
+.. contents::
+ :local:
+
+This is an experimental plugin for allowing to run tests written
+in 'nosetests' style with ``pytest``.
+
+Usage
+-------------
+
+type::
+
+ py.test # instead of 'nosetests'
+
+and you should be able to run nose style tests and at the same
+time can make full use of pytest's capabilities.
+
+Supported nose Idioms
+----------------------
+
+* setup and teardown at module/class/method level
+* SkipTest exceptions and markers
+* setup/teardown decorators
+* yield-based tests and their setup
+* general usage of nose utilities
+
+Unsupported idioms / issues
+----------------------------------
+
+- nose-style doctests are not collected and executed correctly,
+ also fixtures don't work.
+
+- no nose-configuration is recognized
+
+If you find other issues or have suggestions please run::
+
+ py.test --pastebin=all
+
+and send the resulting URL to a ``pytest`` contact channel,
+at best to the mailing list.
+
+Start improving this plugin in 30 seconds
+=========================================
+
+
+1. Download `pytest_nose.py`_ plugin source code
+2. put it somewhere as ``pytest_nose.py`` into your import path
+3. a subsequent ``pytest`` run will use your local version
+
+Checkout customize_, other plugins_ or `get in contact`_.
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/oejskit.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/oejskit.rst
new file mode 100644
index 000000000..4995aa17c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/oejskit.rst
@@ -0,0 +1,12 @@
+pytest_oejskit plugin (EXTERNAL)
+==========================================
+
+The `oejskit`_ offers a ``pytest`` plugin for running Javascript tests in live browsers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations.
+The approach enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along.
+
+For more info and download please visit the `oejskit PyPI`_ page.
+
+.. _`oejskit`:
+.. _`oejskit PyPI`: http://pypi.python.org/pypi/oejskit
+
+.. source link 'http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py',
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/terminal.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/terminal.rst
new file mode 100644
index 000000000..214c24dfc
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/terminal.rst
@@ -0,0 +1,40 @@
+
+Implements terminal reporting of the full testing process.
+==========================================================
+
+
+.. contents::
+ :local:
+
+This is a good source for looking at the various reporting hooks.
+
+command line options
+--------------------
+
+
+``-v, --verbose``
+ increase verbosity.
+``-r chars``
+ show extra test summary info as specified by chars (f)ailed, (s)skipped, (x)failed, (X)passed.
+``-l, --showlocals``
+ show locals in tracebacks (disabled by default).
+``--report=opts``
+ (deprecated, use -r)
+``--tb=style``
+ traceback print mode (long/short/line/no).
+``--fulltrace``
+ don't cut any tracebacks (default is to cut).
+``--fixtures``
+ show available function arguments, sorted by plugin
+
+Start improving this plugin in 30 seconds
+=========================================
+
+
+1. Download `pytest_terminal.py`_ plugin source code
+2. put it somewhere as ``pytest_terminal.py`` into your import path
+3. a subsequent ``pytest`` run will use your local version
+
+Checkout customize_, other plugins_ or `get in contact`_.
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/xdist.rst b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/xdist.rst
new file mode 100644
index 000000000..7ab6cdc8b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/plugin/xdist.rst
@@ -0,0 +1,172 @@
+
+loop on failing tests, distribute test runs to CPUs and hosts.
+==============================================================
+
+
+.. contents::
+ :local:
+
+The `pytest-xdist`_ plugin extends ``pytest`` with some unique
+test execution modes:
+
+* Looponfail: run your tests repeatedly in a subprocess. After each run
+ ``pytest`` waits until a file in your project changes and then re-runs the
+ previously failing tests. This is repeated until all tests pass after which
+ again a full run is performed.
+
+* Load-balancing: if you have multiple CPUs or hosts you can use
+ those for a combined test run. This allows to speed up
+ development or to use special resources of remote machines.
+
+* Multi-Platform coverage: you can specify different Python interpreters
+ or different platforms and run tests in parallel on all of them.
+
+Before running tests remotely, ``pytest`` efficiently synchronizes your
+program source code to the remote place. All test results
+are reported back and displayed to your local test session.
+You may specify different Python versions and interpreters.
+
+.. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist
+
+Usage examples
+---------------------
+
+Speed up test runs by sending tests to multiple CPUs
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+To send tests to multiple CPUs, type::
+
+ py.test -n NUM
+
+Especially for longer running tests or tests requiring
+a lot of IO this can lead to considerable speed ups.
+
+
+Running tests in a Python subprocess
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+To instantiate a python2.4 sub process and send tests to it, you may type::
+
+ py.test -d --tx popen//python=python2.4
+
+This will start a subprocess which is run with the "python2.4"
+Python interpreter, found in your system binary lookup path.
+
+If you prefix the --tx option value like this::
+
+ --tx 3*popen//python=python2.4
+
+then three subprocesses would be created and tests
+will be load-balanced across these three processes.
+
+
+Sending tests to remote SSH accounts
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Suppose you have a package ``mypkg`` which contains some
+tests that you can successfully run locally. And you
+have a ssh-reachable machine ``myhost``. Then
+you can ad-hoc distribute your tests by typing::
+
+ py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
+
+This will synchronize your ``mypkg`` package directory
+to an remote ssh account and then locally collect tests
+and send them to remote places for execution.
+
+You can specify multiple ``--rsyncdir`` directories
+to be sent to the remote side.
+
+**NOTE:** For ``pytest`` to collect and send tests correctly
+you not only need to make sure all code and tests
+directories are rsynced, but that any test (sub) directory
+also has an ``__init__.py`` file because internally
+``pytest`` references tests using their fully qualified python
+module path. **You will otherwise get strange errors**
+during setup of the remote side.
+
+Sending tests to remote Socket Servers
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Download the single-module `socketserver.py`_ Python program
+and run it like this::
+
+ python socketserver.py
+
+It will tell you that it starts listening on the default
+port. You can now on your home machine specify this
+new socket host with something like this::
+
+ py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
+
+
+.. _`atonce`:
+
+Running tests on many platforms at once
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+The basic command to run tests on multiple platforms is::
+
+ py.test --dist=each --tx=spec1 --tx=spec2
+
+If you specify a windows host, an OSX host and a Linux
+environment this command will send each tests to all
+platforms - and report back failures from all platforms
+at once. The specifications strings use the `xspec syntax`_.
+
+.. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec
+
+.. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py
+
+.. _`execnet`: http://codespeak.net/execnet
+
+Specifying test exec environments in a conftest.py
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Instead of specifying command line options, you can
+put options values in a ``conftest.py`` file like this::
+
+ option_tx = ['ssh=myhost//python=python2.7', 'popen//python=python2.7']
+ option_dist = True
+
+Any commandline ``--tx`` specifications will add to the list of
+available execution environments.
+
+Specifying "rsync" dirs in a conftest.py
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+In your ``mypkg/conftest.py`` you may specify directories to synchronise
+or to exclude::
+
+ rsyncdirs = ['.', '../plugins']
+ rsyncignore = ['_cache']
+
+These directory specifications are relative to the directory
+where the ``conftest.py`` is found.
+
+command line options
+--------------------
+
+
+``-f, --looponfail``
+ run tests in subprocess, wait for modified files and re-run failing test set until all pass.
+``-n numprocesses``
+ shortcut for '--dist=load --tx=NUM*popen'
+``--boxed``
+ box each test run in a separate process (unix)
+``--dist=distmode``
+ set mode for distributing tests to exec environments.
+
+ each: send each test to each available environment.
+
+ load: send each test to one available environment so it is run only once.
+
+ (default) no: run tests inprocess, don't distribute.
+``--tx=xspec``
+ add a test execution environment. some examples: --tx popen//python=python2.7 --tx socket=192.168.1.102:8888 --tx ssh=user@codespeak.net//chdir=testcache
+``-d``
+ load-balance tests. shortcut for '--dist=load'
+``--rsyncdir=dir1``
+ add directory for rsyncing to remote tx nodes.
+
+.. include:: links.txt
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/test/test.html b/testing/web-platform/tests/tools/pytest/doc/en/test/test.html
new file mode 100644
index 000000000..7d00f718a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/test/test.html
@@ -0,0 +1,18 @@
+<html>
+ <head>
+ <meta http-equiv="refresh" content=" 1 ; URL=index.html" />
+ </head>
+
+ <body>
+<script type="text/javascript">
+var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+</script>
+<script type="text/javascript">
+try {
+var pageTracker = _gat._getTracker("UA-7597274-3");
+pageTracker._trackPageview();
+} catch(err) {}</script>
+</body>
+</html>
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/tmpdir.rst b/testing/web-platform/tests/tools/pytest/doc/en/tmpdir.rst
new file mode 100644
index 000000000..f8935b8ce
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/tmpdir.rst
@@ -0,0 +1,111 @@
+
+.. _`tmpdir handling`:
+.. _tmpdir:
+
+Temporary directories and files
+================================================
+
+The 'tmpdir' fixture
+--------------------
+
+You can use the ``tmpdir`` fixture which will
+provide a temporary directory unique to the test invocation,
+created in the `base temporary directory`_.
+
+``tmpdir`` is a `py.path.local`_ object which offers ``os.path`` methods
+and more. Here is an example test usage::
+
+ # content of test_tmpdir.py
+ import os
+ def test_create_file(tmpdir):
+ p = tmpdir.mkdir("sub").join("hello.txt")
+ p.write("content")
+ assert p.read() == "content"
+ assert len(tmpdir.listdir()) == 1
+ assert 0
+
+Running this would result in a passed test except for the last
+``assert 0`` line which we use to look at values::
+
+ $ py.test test_tmpdir.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_tmpdir.py F
+
+ ======= FAILURES ========
+ _______ test_create_file ________
+
+ tmpdir = local('PYTEST_TMPDIR/test_create_file0')
+
+ def test_create_file(tmpdir):
+ p = tmpdir.mkdir("sub").join("hello.txt")
+ p.write("content")
+ assert p.read() == "content"
+ assert len(tmpdir.listdir()) == 1
+ > assert 0
+ E assert 0
+
+ test_tmpdir.py:7: AssertionError
+ ======= 1 failed in 0.12 seconds ========
+
+The 'tmpdir_factory' fixture
+----------------------------
+
+.. versionadded:: 2.8
+
+The ``tmpdir_factory`` is a session-scoped fixture which can be used
+to create arbitrary temporary directories from any other fixture or test.
+
+For example, suppose your test suite needs a large image on disk, which is
+generated procedurally. Instead of computing the same image for each test
+that uses it into its own ``tmpdir``, you can generate it once per-session
+to save time:
+
+.. code-block:: python
+
+ # contents of conftest.py
+ import pytest
+
+ @pytest.fixture(scope='session')
+ def image_file(tmpdir_factory):
+ img = compute_expensive_image()
+ fn = tmpdir_factory.mktemp('data').join('img.png')
+ img.save(str(fn))
+ return fn
+
+ # contents of test_image.py
+ def test_histogram(image_file):
+ img = load_image(image_file)
+ # compute and test histogram
+
+``tmpdir_factory`` instances have the following methods:
+
+.. currentmodule:: _pytest.tmpdir
+
+.. automethod:: TempdirFactory.mktemp
+.. automethod:: TempdirFactory.getbasetemp
+
+.. _`base temporary directory`:
+
+The default base temporary directory
+-----------------------------------------------
+
+Temporary directories are by default created as sub-directories of
+the system temporary directory. The base name will be ``pytest-NUM`` where
+``NUM`` will be incremented with each test run. Moreover, entries older
+than 3 temporary directories will be removed.
+
+You can override the default temporary directory setting like this::
+
+ py.test --basetemp=mydir
+
+When distributing tests on the local machine, ``pytest`` takes care to
+configure a basetemp directory for the sub processes such that all temporary
+data lands below a single per-test run basetemp directory.
+
+.. _`py.path.local`: http://py.rtfd.org/en/latest/path.html
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/unittest.rst b/testing/web-platform/tests/tools/pytest/doc/en/unittest.rst
new file mode 100644
index 000000000..ce99bd118
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/unittest.rst
@@ -0,0 +1,190 @@
+
+.. _`unittest.TestCase`:
+
+Support for unittest.TestCase / Integration of fixtures
+=====================================================================
+
+.. _`unittest.py style`: http://docs.python.org/library/unittest.html
+
+``pytest`` has support for running Python `unittest.py style`_ tests.
+It's meant for leveraging existing unittest-style projects
+to use pytest features. Concretely, pytest will automatically
+collect ``unittest.TestCase`` subclasses and their ``test`` methods in
+test files. It will invoke typical setup/teardown methods and
+generally try to make test suites written to run on unittest, to also
+run using ``pytest``. We assume here that you are familiar with writing
+``unittest.TestCase`` style tests and rather focus on
+integration aspects.
+
+Usage
+-------------------------------------------------------------------
+
+After :ref:`installation` type::
+
+ py.test
+
+and you should be able to run your unittest-style tests if they
+are contained in ``test_*`` modules. If that works for you then
+you can make use of most :ref:`pytest features <features>`, for example
+``--pdb`` debugging in failures, using :ref:`plain assert-statements <assert>`,
+:ref:`more informative tracebacks <tbreportdemo>`, stdout-capturing or
+distributing tests to multiple CPUs via the ``-nNUM`` option if you
+installed the ``pytest-xdist`` plugin. Please refer to
+the general ``pytest`` documentation for many more examples.
+
+Mixing pytest fixtures into unittest.TestCase style tests
+-----------------------------------------------------------
+
+Running your unittest with ``pytest`` allows you to use its
+:ref:`fixture mechanism <fixture>` with ``unittest.TestCase`` style
+tests. Assuming you have at least skimmed the pytest fixture features,
+let's jump-start into an example that integrates a pytest ``db_class``
+fixture, setting up a class-cached database object, and then reference
+it from a unittest-style test::
+
+ # content of conftest.py
+
+ # we define a fixture function below and it will be "used" by
+ # referencing its name from tests
+
+ import pytest
+
+ @pytest.fixture(scope="class")
+ def db_class(request):
+ class DummyDB:
+ pass
+ # set a class attribute on the invoking test context
+ request.cls.db = DummyDB()
+
+This defines a fixture function ``db_class`` which - if used - is
+called once for each test class and which sets the class-level
+``db`` attribute to a ``DummyDB`` instance. The fixture function
+achieves this by receiving a special ``request`` object which gives
+access to :ref:`the requesting test context <request-context>` such
+as the ``cls`` attribute, denoting the class from which the fixture
+is used. This architecture de-couples fixture writing from actual test
+code and allows re-use of the fixture by a minimal reference, the fixture
+name. So let's write an actual ``unittest.TestCase`` class using our
+fixture definition::
+
+ # content of test_unittest_db.py
+
+ import unittest
+ import pytest
+
+ @pytest.mark.usefixtures("db_class")
+ class MyTest(unittest.TestCase):
+ def test_method1(self):
+ assert hasattr(self, "db")
+ assert 0, self.db # fail for demo purposes
+
+ def test_method2(self):
+ assert 0, self.db # fail for demo purposes
+
+The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that
+the pytest fixture function ``db_class`` is called once per class.
+Due to the deliberately failing assert statements, we can take a look at
+the ``self.db`` values in the traceback::
+
+ $ py.test test_unittest_db.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_unittest_db.py FF
+
+ ======= FAILURES ========
+ _______ MyTest.test_method1 ________
+
+ self = <test_unittest_db.MyTest testMethod=test_method1>
+
+ def test_method1(self):
+ assert hasattr(self, "db")
+ > assert 0, self.db # fail for demo purposes
+ E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
+ E assert 0
+
+ test_unittest_db.py:9: AssertionError
+ _______ MyTest.test_method2 ________
+
+ self = <test_unittest_db.MyTest testMethod=test_method2>
+
+ def test_method2(self):
+ > assert 0, self.db # fail for demo purposes
+ E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
+ E assert 0
+
+ test_unittest_db.py:12: AssertionError
+ ======= 2 failed in 0.12 seconds ========
+
+This default pytest traceback shows that the two test methods
+share the same ``self.db`` instance which was our intention
+when writing the class-scoped fixture function above.
+
+
+autouse fixtures and accessing other fixtures
+-------------------------------------------------------------------
+
+Although it's usually better to explicitly declare use of fixtures you need
+for a given test, you may sometimes want to have fixtures that are
+automatically used in a given context. After all, the traditional
+style of unittest-setup mandates the use of this implicit fixture writing
+and chances are, you are used to it or like it.
+
+You can flag fixture functions with ``@pytest.fixture(autouse=True)``
+and define the fixture function in the context where you want it used.
+Let's look at an ``initdir`` fixture which makes all test methods of a
+``TestCase`` class execute in a temporary directory with a
+pre-initialized ``samplefile.ini``. Our ``initdir`` fixture itself uses
+the pytest builtin :ref:`tmpdir <tmpdir>` fixture to delegate the
+creation of a per-test temporary directory::
+
+ # content of test_unittest_cleandir.py
+ import pytest
+ import unittest
+
+ class MyTest(unittest.TestCase):
+ @pytest.fixture(autouse=True)
+ def initdir(self, tmpdir):
+ tmpdir.chdir() # change to pytest-provided temporary directory
+ tmpdir.join("samplefile.ini").write("# testdata")
+
+ def test_method(self):
+ s = open("samplefile.ini").read()
+ assert "testdata" in s
+
+Due to the ``autouse`` flag the ``initdir`` fixture function will be
+used for all methods of the class where it is defined. This is a
+shortcut for using a ``@pytest.mark.usefixtures("initdir")`` marker
+on the class like in the previous example.
+
+Running this test module ...::
+
+ $ py.test -q test_unittest_cleandir.py
+ .
+ 1 passed in 0.12 seconds
+
+... gives us one passed test because the ``initdir`` fixture function
+was executed ahead of the ``test_method``.
+
+.. note::
+
+ While pytest supports receiving fixtures via :ref:`test function arguments <funcargs>` for non-unittest test methods, ``unittest.TestCase`` methods cannot directly receive fixture
+ function arguments as implementing that is likely to inflict
+ on the ability to run general unittest.TestCase test suites.
+ Maybe optional support would be possible, though. If unittest finally
+ grows a plugin system that should help as well. In the meanwhile, the
+ above ``usefixtures`` and ``autouse`` examples should help to mix in
+ pytest fixtures into unittest suites. And of course you can also start
+ to selectively leave away the ``unittest.TestCase`` subclassing, use
+ plain asserts and get the unlimited pytest feature set.
+
+
+Converting from unittest to pytest
+---------------------------------------
+
+If you want to convert your unittest testcases to pytest, there are
+some helpers like `unittest2pytest
+<https://pypi.python.org/pypi/unittest2pytest/>`__, which uses lib2to3
+and introspection for the transformation.
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/usage.rst b/testing/web-platform/tests/tools/pytest/doc/en/usage.rst
new file mode 100644
index 000000000..4b92fd1e1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/usage.rst
@@ -0,0 +1,275 @@
+
+.. _usage:
+
+Usage and Invocations
+==========================================
+
+
+.. _cmdline:
+
+Calling pytest through ``python -m pytest``
+-----------------------------------------------------
+
+.. versionadded:: 2.0
+
+You can invoke testing through the Python interpreter from the command line::
+
+ python -m pytest [...]
+
+This is equivalent to invoking the command line script ``py.test [...]``
+directly.
+
+Getting help on version, option names, environment variables
+--------------------------------------------------------------
+
+::
+
+ py.test --version # shows where pytest was imported from
+ py.test --fixtures # show available builtin function arguments
+ py.test -h | --help # show help on command line and config file options
+
+
+Stopping after the first (or N) failures
+---------------------------------------------------
+
+To stop the testing process after the first (N) failures::
+
+ py.test -x # stop after first failure
+ py.test --maxfail=2 # stop after two failures
+
+Specifying tests / selecting tests
+---------------------------------------------------
+
+Several test run options::
+
+ py.test test_mod.py # run tests in module
+ py.test somepath # run all tests below somepath
+ py.test -k stringexpr # only run tests with names that match the
+ # "string expression", e.g. "MyClass and not method"
+ # will select TestMyClass.test_something
+ # but not TestMyClass.test_method_simple
+ py.test test_mod.py::test_func # only run tests that match the "node ID",
+ # e.g "test_mod.py::test_func" will select
+ # only test_func in test_mod.py
+ py.test test_mod.py::TestClass::test_method # run a single method in
+ # a single class
+
+Import 'pkg' and use its filesystem location to find and run tests::
+
+ py.test --pyargs pkg # run all tests found below directory of pypkg
+
+Modifying Python traceback printing
+----------------------------------------------
+
+Examples for modifying traceback printing::
+
+ py.test --showlocals # show local variables in tracebacks
+ py.test -l # show local variables (shortcut)
+
+ py.test --tb=auto # (default) 'long' tracebacks for the first and last
+ # entry, but 'short' style for the other entries
+ py.test --tb=long # exhaustive, informative traceback formatting
+ py.test --tb=short # shorter traceback format
+ py.test --tb=line # only one line per failure
+ py.test --tb=native # Python standard library formatting
+ py.test --tb=no # no traceback at all
+
+Dropping to PDB_ (Python Debugger) on failures
+-----------------------------------------------
+
+.. _PDB: http://docs.python.org/library/pdb.html
+
+Python comes with a builtin Python debugger called PDB_. ``pytest``
+allows one to drop into the PDB_ prompt via a command line option::
+
+ py.test --pdb
+
+This will invoke the Python debugger on every failure. Often you might
+only want to do this for the first failing test to understand a certain
+failure situation::
+
+ py.test -x --pdb # drop to PDB on first failure, then end test session
+ py.test --pdb --maxfail=3 # drop to PDB for first three failures
+
+Note that on any failure the exception information is stored on
+``sys.last_value``, ``sys.last_type`` and ``sys.last_traceback``. In
+interactive use, this allows one to drop into postmortem debugging with
+any debug tool. One can also manually access the exception information,
+for example::
+
+ >>> import sys
+ >>> sys.last_traceback.tb_lineno
+ 42
+ >>> sys.last_value
+ AssertionError('assert result == "ok"',)
+
+Setting a breakpoint / aka ``set_trace()``
+----------------------------------------------------
+
+If you want to set a breakpoint and enter the ``pdb.set_trace()`` you
+can use a helper::
+
+ import pytest
+ def test_function():
+ ...
+ pytest.set_trace() # invoke PDB debugger and tracing
+
+.. versionadded: 2.0.0
+
+Prior to pytest version 2.0.0 you could only enter PDB_ tracing if you disabled
+capturing on the command line via ``py.test -s``. In later versions, pytest
+automatically disables its output capture when you enter PDB_ tracing:
+
+* Output capture in other tests is not affected.
+* Any prior test output that has already been captured and will be processed as
+ such.
+* Any later output produced within the same test will not be captured and will
+ instead get sent directly to ``sys.stdout``. Note that this holds true even
+ for test output occurring after you exit the interactive PDB_ tracing session
+ and continue with the regular test run.
+
+.. versionadded: 2.4.0
+
+Since pytest version 2.4.0 you can also use the native Python
+``import pdb;pdb.set_trace()`` call to enter PDB_ tracing without having to use
+the ``pytest.set_trace()`` wrapper or explicitly disable pytest's output
+capturing via ``py.test -s``.
+
+.. _durations:
+
+Profiling test execution duration
+-------------------------------------
+
+.. versionadded: 2.2
+
+To get a list of the slowest 10 test durations::
+
+ py.test --durations=10
+
+
+Creating JUnitXML format files
+----------------------------------------------------
+
+To create result files which can be read by Hudson_ or other Continuous
+integration servers, use this invocation::
+
+ py.test --junitxml=path
+
+to create an XML file at ``path``.
+
+record_xml_property
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. versionadded:: 2.8
+
+If you want to log additional information for a test, you can use the
+``record_xml_property`` fixture:
+
+.. code-block:: python
+
+ def test_function(record_xml_property):
+ record_xml_property("example_key", 1)
+ assert 0
+
+This will add an extra property ``example_key="1"`` to the generated
+``testcase`` tag:
+
+.. code-block:: xml
+
+ <testcase classname="test_function" file="test_function.py" line="0" name="test_function" time="0.0009">
+ <properties>
+ <property name="example_key" value="1" />
+ </properties>
+ </testcase>
+
+.. warning::
+
+ This is an experimental feature, and its interface might be replaced
+ by something more powerful and general in future versions. The
+ functionality per-se will be kept, however.
+
+ Currently it does not work when used with the ``pytest-xdist`` plugin.
+
+ Also please note that using this feature will break any schema verification.
+ This might be a problem when used with some CI servers.
+
+Creating resultlog format files
+----------------------------------------------------
+
+To create plain-text machine-readable result files you can issue::
+
+ py.test --resultlog=path
+
+and look at the content at the ``path`` location. Such files are used e.g.
+by the `PyPy-test`_ web page to show test results over several revisions.
+
+.. _`PyPy-test`: http://buildbot.pypy.org/summary
+
+
+Sending test report to online pastebin service
+-----------------------------------------------------
+
+**Creating a URL for each test failure**::
+
+ py.test --pastebin=failed
+
+This will submit test run information to a remote Paste service and
+provide a URL for each failure. You may select tests as usual or add
+for example ``-x`` if you only want to send one particular failure.
+
+**Creating a URL for a whole test session log**::
+
+ py.test --pastebin=all
+
+Currently only pasting to the http://bpaste.net service is implemented.
+
+Disabling plugins
+-----------------
+
+To disable loading specific plugins at invocation time, use the ``-p`` option
+together with the prefix ``no:``.
+
+Example: to disable loading the plugin ``doctest``, which is responsible for
+executing doctest tests from text files, invoke py.test like this::
+
+ py.test -p no:doctest
+
+.. _`pytest.main-usage`:
+
+Calling pytest from Python code
+----------------------------------------------------
+
+.. versionadded:: 2.0
+
+You can invoke ``pytest`` from Python code directly::
+
+ pytest.main()
+
+this acts as if you would call "py.test" from the command line.
+It will not raise ``SystemExit`` but return the exitcode instead.
+You can pass in options and arguments::
+
+ pytest.main(['-x', 'mytestdir'])
+
+or pass in a string::
+
+ pytest.main("-x mytestdir")
+
+You can specify additional plugins to ``pytest.main``::
+
+ # content of myinvoke.py
+ import pytest
+ class MyPlugin:
+ def pytest_sessionfinish(self):
+ print("*** test run reporting finishing")
+
+ pytest.main("-qq", plugins=[MyPlugin()])
+
+Running it will show that ``MyPlugin`` was added and its
+hook was invoked::
+
+ $ python myinvoke.py
+ *** test run reporting finishing
+
+
+.. include:: links.inc
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/writing_plugins.rst b/testing/web-platform/tests/tools/pytest/doc/en/writing_plugins.rst
new file mode 100644
index 000000000..cc346aaa8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/writing_plugins.rst
@@ -0,0 +1,575 @@
+.. _plugins:
+.. _`writing-plugins`:
+
+Writing plugins
+===============
+
+It is easy to implement `local conftest plugins`_ for your own project
+or `pip-installable plugins`_ that can be used throughout many projects,
+including third party projects. Please refer to :ref:`using plugins` if you
+only want to use but not write plugins.
+
+A plugin contains one or multiple hook functions. :ref:`Writing hooks <writinghooks>`
+explains the basics and details of how you can write a hook function yourself.
+``pytest`` implements all aspects of configuration, collection, running and
+reporting by calling `well specified hooks`_ of the following plugins:
+
+* :ref:`builtin plugins`: loaded from pytest's internal ``_pytest`` directory.
+
+* :ref:`external plugins <extplugins>`: modules discovered through
+ `setuptools entry points`_
+
+* `conftest.py plugins`_: modules auto-discovered in test directories
+
+In principle, each hook call is a ``1:N`` Python function call where ``N`` is the
+number of registered implementation functions for a given specification.
+All specifications and implementations following the ``pytest_`` prefix
+naming convention, making them easy to distinguish and find.
+
+.. _`pluginorder`:
+
+Plugin discovery order at tool startup
+--------------------------------------
+
+``pytest`` loads plugin modules at tool startup in the following way:
+
+* by loading all builtin plugins
+
+* by loading all plugins registered through `setuptools entry points`_.
+
+* by pre-scanning the command line for the ``-p name`` option
+ and loading the specified plugin before actual command line parsing.
+
+* by loading all :file:`conftest.py` files as inferred by the command line
+ invocation:
+
+ - if no test paths are specified use current dir as a test path
+ - if exists, load ``conftest.py`` and ``test*/conftest.py`` relative
+ to the directory part of the first test path.
+
+ Note that pytest does not find ``conftest.py`` files in deeper nested
+ sub directories at tool startup. It is usually a good idea to keep
+ your conftest.py file in the top level test or project root directory.
+
+* by recursively loading all plugins specified by the
+ ``pytest_plugins`` variable in ``conftest.py`` files
+
+
+.. _`pytest/plugin`: http://bitbucket.org/pytest-dev/pytest/src/tip/pytest/plugin/
+.. _`conftest.py plugins`:
+.. _`conftest.py`:
+.. _`localplugin`:
+.. _`conftest`:
+.. _`local conftest plugins`:
+
+conftest.py: local per-directory plugins
+----------------------------------------
+
+Local ``conftest.py`` plugins contain directory-specific hook
+implementations. Hook Session and test running activities will
+invoke all hooks defined in ``conftest.py`` files closer to the
+root of the filesystem. Example of implementing the
+``pytest_runtest_setup`` hook so that is called for tests in the ``a``
+sub directory but not for other directories::
+
+ a/conftest.py:
+ def pytest_runtest_setup(item):
+ # called for running each test in 'a' directory
+ print ("setting up", item)
+
+ a/test_sub.py:
+ def test_sub():
+ pass
+
+ test_flat.py:
+ def test_flat():
+ pass
+
+Here is how you might run it::
+
+ py.test test_flat.py # will not show "setting up"
+ py.test a/test_sub.py # will show "setting up"
+
+.. Note::
+ If you have ``conftest.py`` files which do not reside in a
+ python package directory (i.e. one containing an ``__init__.py``) then
+ "import conftest" can be ambiguous because there might be other
+ ``conftest.py`` files as well on your PYTHONPATH or ``sys.path``.
+ It is thus good practice for projects to either put ``conftest.py``
+ under a package scope or to never import anything from a
+ conftest.py file.
+
+
+Writing your own plugin
+-----------------------
+
+.. _`setuptools`: http://pypi.python.org/pypi/setuptools
+
+If you want to write a plugin, there are many real-life examples
+you can copy from:
+
+* a custom collection example plugin: :ref:`yaml plugin`
+* around 20 :ref:`builtin plugins` which provide pytest's own functionality
+* many `external plugins <http://plugincompat.herokuapp.com>`_ providing additional features
+
+All of these plugins implement the documented `well specified hooks`_
+to extend and add functionality.
+
+.. note::
+ Make sure to check out the excellent
+ `cookiecutter-pytest-plugin <https://github.com/pytest-dev/cookiecutter-pytest-plugin>`_
+ project, which is a `cookiecutter template <https://github.com/audreyr/cookiecutter>`_
+ for authoring plugins.
+
+ The template provides an excellent starting point with a working plugin,
+ tests running with tox, comprehensive README and
+ entry-pointy already pre-configured.
+
+Also consider :ref:`contributing your plugin to pytest-dev<submitplugin>`
+once it has some happy users other than yourself.
+
+
+.. _`setuptools entry points`:
+.. _`pip-installable plugins`:
+
+Making your plugin installable by others
+----------------------------------------
+
+If you want to make your plugin externally available, you
+may define a so-called entry point for your distribution so
+that ``pytest`` finds your plugin module. Entry points are
+a feature that is provided by `setuptools`_. pytest looks up
+the ``pytest11`` entrypoint to discover its
+plugins and you can thus make your plugin available by defining
+it in your setuptools-invocation:
+
+.. sourcecode:: python
+
+ # sample ./setup.py file
+ from setuptools import setup
+
+ setup(
+ name="myproject",
+ packages = ['myproject']
+
+ # the following makes a plugin available to pytest
+ entry_points = {
+ 'pytest11': [
+ 'name_of_plugin = myproject.pluginmodule',
+ ]
+ },
+ )
+
+If a package is installed this way, ``pytest`` will load
+``myproject.pluginmodule`` as a plugin which can define
+`well specified hooks`_.
+
+
+
+
+Requiring/Loading plugins in a test module or conftest file
+-----------------------------------------------------------
+
+You can require plugins in a test module or a conftest file like this::
+
+ pytest_plugins = "name1", "name2",
+
+When the test module or conftest plugin is loaded the specified plugins
+will be loaded as well. You can also use dotted path like this::
+
+ pytest_plugins = "myapp.testsupport.myplugin"
+
+which will import the specified module as a ``pytest`` plugin.
+
+
+Accessing another plugin by name
+--------------------------------
+
+If a plugin wants to collaborate with code from
+another plugin it can obtain a reference through
+the plugin manager like this:
+
+.. sourcecode:: python
+
+ plugin = config.pluginmanager.getplugin("name_of_plugin")
+
+If you want to look at the names of existing plugins, use
+the ``--traceconfig`` option.
+
+Testing plugins
+---------------
+
+pytest comes with some facilities that you can enable for testing your
+plugin. Given that you have an installed plugin you can enable the
+:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a
+command line option to include the pytester plugin (``-p pytester``) or
+by putting ``pytest_plugins = "pytester"`` into your test or
+``conftest.py`` file. You then will have a ``testdir`` fixture which you
+can use like this::
+
+ # content of test_myplugin.py
+
+ pytest_plugins = "pytester" # to get testdir fixture
+
+ def test_myplugin(testdir):
+ testdir.makepyfile("""
+ def test_example():
+ pass
+ """)
+ result = testdir.runpytest("--verbose")
+ result.fnmatch_lines("""
+ test_example*
+ """)
+
+Note that by default ``testdir.runpytest()`` will perform a pytest
+in-process. You can pass the command line option ``--runpytest=subprocess``
+to have it happen in a subprocess.
+
+Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more
+methods of the result object that you get from a call to ``runpytest``.
+
+.. _`writinghooks`:
+
+Writing hook functions
+======================
+
+
+.. _validation:
+
+hook function validation and execution
+--------------------------------------
+
+pytest calls hook functions from registered plugins for any
+given hook specification. Let's look at a typical hook function
+for the ``pytest_collection_modifyitems(session, config,
+items)`` hook which pytest calls after collection of all test items is
+completed.
+
+When we implement a ``pytest_collection_modifyitems`` function in our plugin
+pytest will during registration verify that you use argument
+names which match the specification and bail out if not.
+
+Let's look at a possible implementation:
+
+.. code-block:: python
+
+ def pytest_collection_modifyitems(config, items):
+ # called after collection is completed
+ # you can modify the ``items`` list
+
+Here, ``pytest`` will pass in ``config`` (the pytest config object)
+and ``items`` (the list of collected test items) but will not pass
+in the ``session`` argument because we didn't list it in the function
+signature. This dynamic "pruning" of arguments allows ``pytest`` to
+be "future-compatible": we can introduce new hook named parameters without
+breaking the signatures of existing hook implementations. It is one of
+the reasons for the general long-lived compatibility of pytest plugins.
+
+Note that hook functions other than ``pytest_runtest_*`` are not
+allowed to raise exceptions. Doing so will break the pytest run.
+
+
+
+firstresult: stop at first non-None result
+-------------------------------------------
+
+Most calls to ``pytest`` hooks result in a **list of results** which contains
+all non-None results of the called hook functions.
+
+Some hook specifications use the ``firstresult=True`` option so that the hook
+call only executes until the first of N registered functions returns a
+non-None result which is then taken as result of the overall hook call.
+The remaining hook functions will not be called in this case.
+
+
+hookwrapper: executing around other hooks
+-------------------------------------------------
+
+.. currentmodule:: _pytest.core
+
+.. versionadded:: 2.7 (experimental)
+
+pytest plugins can implement hook wrappers which wrap the execution
+of other hook implementations. A hook wrapper is a generator function
+which yields exactly once. When pytest invokes hooks it first executes
+hook wrappers and passes the same arguments as to the regular hooks.
+
+At the yield point of the hook wrapper pytest will execute the next hook
+implementations and return their result to the yield point in the form of
+a :py:class:`CallOutcome` instance which encapsulates a result or
+exception info. The yield point itself will thus typically not raise
+exceptions (unless there are bugs).
+
+Here is an example definition of a hook wrapper::
+
+ import pytest
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_pyfunc_call(pyfuncitem):
+ # do whatever you want before the next hook executes
+
+ outcome = yield
+ # outcome.excinfo may be None or a (cls, val, tb) tuple
+
+ res = outcome.get_result() # will raise if outcome was exception
+ # postprocess result
+
+Note that hook wrappers don't return results themselves, they merely
+perform tracing or other side effects around the actual hook implementations.
+If the result of the underlying hook is a mutable object, they may modify
+that result but it's probably better to avoid it.
+
+
+Hook function ordering / call example
+-------------------------------------
+
+For any given hook specification there may be more than one
+implementation and we thus generally view ``hook`` execution as a
+``1:N`` function call where ``N`` is the number of registered functions.
+There are ways to influence if a hook implementation comes before or
+after others, i.e. the position in the ``N``-sized list of functions:
+
+.. code-block:: python
+
+ # Plugin 1
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_collection_modifyitems(items):
+ # will execute as early as possible
+
+ # Plugin 2
+ @pytest.hookimpl(trylast=True)
+ def pytest_collection_modifyitems(items):
+ # will execute as late as possible
+
+ # Plugin 3
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_collection_modifyitems(items):
+ # will execute even before the tryfirst one above!
+ outcome = yield
+ # will execute after all non-hookwrappers executed
+
+Here is the order of execution:
+
+1. Plugin3's pytest_collection_modifyitems called until the yield point
+ because it is a hook wrapper.
+
+2. Plugin1's pytest_collection_modifyitems is called because it is marked
+ with ``tryfirst=True``.
+
+3. Plugin2's pytest_collection_modifyitems is called because it is marked
+ with ``trylast=True`` (but even without this mark it would come after
+ Plugin1).
+
+4. Plugin3's pytest_collection_modifyitems then executing the code after the yield
+ point. The yield receives a :py:class:`CallOutcome` instance which encapsulates
+ the result from calling the non-wrappers. Wrappers shall not modify the result.
+
+It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with
+``hookwrapper=True`` in which case it will influence the ordering of hookwrappers
+among each other.
+
+
+Declaring new hooks
+------------------------
+
+.. currentmodule:: _pytest.hookspec
+
+Plugins and ``conftest.py`` files may declare new hooks that can then be
+implemented by other plugins in order to alter behaviour or interact with
+the new plugin:
+
+.. autofunction:: pytest_addhooks
+
+Hooks are usually declared as do-nothing functions that contain only
+documentation describing when the hook will be called and what return values
+are expected.
+
+For an example, see `newhooks.py`_ from :ref:`xdist`.
+
+.. _`newhooks.py`: https://github.com/pytest-dev/pytest-xdist/blob/974bd566c599dc6a9ea291838c6f226197208b46/xdist/newhooks.py
+
+
+Optionally using hooks from 3rd party plugins
+---------------------------------------------
+
+Using new hooks from plugins as explained above might be a little tricky
+because of the standard :ref:`validation mechanism <validation>`:
+if you depend on a plugin that is not installed, validation will fail and
+the error message will not make much sense to your users.
+
+One approach is to defer the hook implementation to a new plugin instead of
+declaring the hook functions directly in your plugin module, for example::
+
+ # contents of myplugin.py
+
+ class DeferPlugin(object):
+ """Simple plugin to defer pytest-xdist hook functions."""
+
+ def pytest_testnodedown(self, node, error):
+ """standard xdist hook function.
+ """
+
+ def pytest_configure(config):
+ if config.pluginmanager.hasplugin('xdist'):
+ config.pluginmanager.register(DeferPlugin())
+
+This has the added benefit of allowing you to conditionally install hooks
+depending on which plugins are installed.
+
+.. _`well specified hooks`:
+
+.. currentmodule:: _pytest.hookspec
+
+pytest hook reference
+=====================
+
+
+Initialization, command line and configuration hooks
+----------------------------------------------------
+
+.. autofunction:: pytest_load_initial_conftests
+.. autofunction:: pytest_cmdline_preparse
+.. autofunction:: pytest_cmdline_parse
+.. autofunction:: pytest_namespace
+.. autofunction:: pytest_addoption
+.. autofunction:: pytest_cmdline_main
+.. autofunction:: pytest_configure
+.. autofunction:: pytest_unconfigure
+
+Generic "runtest" hooks
+-----------------------
+
+All runtest related hooks receive a :py:class:`pytest.Item` object.
+
+.. autofunction:: pytest_runtest_protocol
+.. autofunction:: pytest_runtest_setup
+.. autofunction:: pytest_runtest_call
+.. autofunction:: pytest_runtest_teardown
+.. autofunction:: pytest_runtest_makereport
+
+For deeper understanding you may look at the default implementation of
+these hooks in :py:mod:`_pytest.runner` and maybe also
+in :py:mod:`_pytest.pdb` which interacts with :py:mod:`_pytest.capture`
+and its input/output capturing in order to immediately drop
+into interactive debugging when a test failure occurs.
+
+The :py:mod:`_pytest.terminal` reported specifically uses
+the reporting hook to print information about a test run.
+
+Collection hooks
+----------------
+
+``pytest`` calls the following hooks for collecting files and directories:
+
+.. autofunction:: pytest_ignore_collect
+.. autofunction:: pytest_collect_directory
+.. autofunction:: pytest_collect_file
+
+For influencing the collection of objects in Python modules
+you can use the following hook:
+
+.. autofunction:: pytest_pycollect_makeitem
+.. autofunction:: pytest_generate_tests
+
+After collection is complete, you can modify the order of
+items, delete or otherwise amend the test items:
+
+.. autofunction:: pytest_collection_modifyitems
+
+Reporting hooks
+---------------
+
+Session related reporting hooks:
+
+.. autofunction:: pytest_collectstart
+.. autofunction:: pytest_itemcollected
+.. autofunction:: pytest_collectreport
+.. autofunction:: pytest_deselected
+.. autofunction:: pytest_report_header
+.. autofunction:: pytest_report_teststatus
+.. autofunction:: pytest_terminal_summary
+
+And here is the central hook for reporting about
+test execution:
+
+.. autofunction:: pytest_runtest_logreport
+
+You can also use this hook to customize assertion representation for some
+types:
+
+.. autofunction:: pytest_assertrepr_compare
+
+
+Debugging/Interaction hooks
+---------------------------
+
+There are few hooks which can be used for special
+reporting or interaction with exceptions:
+
+.. autofunction:: pytest_internalerror
+.. autofunction:: pytest_keyboard_interrupt
+.. autofunction:: pytest_exception_interact
+.. autofunction:: pytest_enter_pdb
+
+
+Reference of objects involved in hooks
+======================================
+
+.. autoclass:: _pytest.config.Config()
+ :members:
+
+.. autoclass:: _pytest.config.Parser()
+ :members:
+
+.. autoclass:: _pytest.main.Node()
+ :members:
+
+.. autoclass:: _pytest.main.Collector()
+ :members:
+ :show-inheritance:
+
+.. autoclass:: _pytest.main.Item()
+ :members:
+ :show-inheritance:
+
+.. autoclass:: _pytest.python.Module()
+ :members:
+ :show-inheritance:
+
+.. autoclass:: _pytest.python.Class()
+ :members:
+ :show-inheritance:
+
+.. autoclass:: _pytest.python.Function()
+ :members:
+ :show-inheritance:
+
+.. autoclass:: _pytest.runner.CallInfo()
+ :members:
+
+.. autoclass:: _pytest.runner.TestReport()
+ :members:
+
+.. autoclass:: _pytest.vendored_packages.pluggy._CallOutcome()
+ :members:
+
+.. autofunction:: _pytest.config.get_plugin_manager()
+
+.. autoclass:: _pytest.config.PytestPluginManager()
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: pluggy.PluginManager()
+ :members:
+
+.. currentmodule:: _pytest.pytester
+
+.. autoclass:: Testdir()
+ :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile
+
+.. autoclass:: RunResult()
+ :members:
+
+.. autoclass:: LineMatcher()
+ :members:
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/xdist.rst b/testing/web-platform/tests/tools/pytest/doc/en/xdist.rst
new file mode 100644
index 000000000..ee1bd6032
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/xdist.rst
@@ -0,0 +1,197 @@
+
+.. _`xdist`:
+
+xdist: pytest distributed testing plugin
+===============================================================
+
+The `pytest-xdist`_ plugin extends ``pytest`` with some unique
+test execution modes:
+
+* Looponfail: run your tests repeatedly in a subprocess. After each
+ run, ``pytest`` waits until a file in your project changes and then
+ re-runs the previously failing tests. This is repeated until all
+ tests pass. At this point a full run is again performed.
+
+* multiprocess Load-balancing: if you have multiple CPUs or hosts you can use
+ them for a combined test run. This allows to speed up
+ development or to use special resources of remote machines.
+
+* Multi-Platform coverage: you can specify different Python interpreters
+ or different platforms and run tests in parallel on all of them.
+
+Before running tests remotely, ``pytest`` efficiently "rsyncs" your
+program source code to the remote place. All test results
+are reported back and displayed to your local terminal.
+You may specify different Python versions and interpreters.
+
+
+Installation of xdist plugin
+------------------------------
+
+Install the plugin with::
+
+ easy_install pytest-xdist
+
+ # or
+
+ pip install pytest-xdist
+
+or use the package in develop/in-place mode with
+a checkout of the `pytest-xdist repository`_ ::
+
+ python setup.py develop
+
+
+Usage examples
+---------------------
+
+.. _`xdistcpu`:
+
+Speed up test runs by sending tests to multiple CPUs
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+To send tests to multiple CPUs, type::
+
+ py.test -n NUM
+
+Especially for longer running tests or tests requiring
+a lot of I/O this can lead to considerable speed ups.
+
+
+Running tests in a Python subprocess
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+To instantiate a Python-2.7 subprocess and send tests to it, you may type::
+
+ py.test -d --tx popen//python=python2.7
+
+This will start a subprocess which is run with the "python2.7"
+Python interpreter, found in your system binary lookup path.
+
+If you prefix the --tx option value like this::
+
+ py.test -d --tx 3*popen//python=python2.7
+
+then three subprocesses would be created and the tests
+will be distributed to three subprocesses and run simultanously.
+
+.. _looponfailing:
+
+
+Running tests in looponfailing mode
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+For refactoring a project with a medium or large test suite
+you can use the looponfailing mode. Simply add the ``--f`` option::
+
+ py.test -f
+
+and ``pytest`` will run your tests. Assuming you have failures it will then
+wait for file changes and re-run the failing test set. File changes are detected by looking at ``looponfailingroots`` root directories and all of their contents (recursively). If the default for this value does not work for you you
+can change it in your project by setting a configuration option::
+
+ # content of a pytest.ini, setup.cfg or tox.ini file
+ [pytest]
+ looponfailroots = mypkg testdir
+
+This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file's directory.
+
+Sending tests to remote SSH accounts
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Suppose you have a package ``mypkg`` which contains some
+tests that you can successfully run locally. And you also
+have a ssh-reachable machine ``myhost``. Then
+you can ad-hoc distribute your tests by typing::
+
+ py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
+
+This will synchronize your ``mypkg`` package directory
+with a remote ssh account and then collect and run your
+tests at the remote side.
+
+You can specify multiple ``--rsyncdir`` directories
+to be sent to the remote side.
+
+.. XXX CHECK
+
+ **NOTE:** For ``pytest`` to collect and send tests correctly
+ you not only need to make sure all code and tests
+ directories are rsynced, but that any test (sub) directory
+ also has an ``__init__.py`` file because internally
+ ``pytest`` references tests as a fully qualified python
+ module path. **You will otherwise get strange errors**
+ during setup of the remote side.
+
+Sending tests to remote Socket Servers
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Download the single-module `socketserver.py`_ Python program
+and run it like this::
+
+ python socketserver.py
+
+It will tell you that it starts listening on the default
+port. You can now on your home machine specify this
+new socket host with something like this::
+
+ py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
+
+
+.. _`atonce`:
+
+Running tests on many platforms at once
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+The basic command to run tests on multiple platforms is::
+
+ py.test --dist=each --tx=spec1 --tx=spec2
+
+If you specify a windows host, an OSX host and a Linux
+environment this command will send each tests to all
+platforms - and report back failures from all platforms
+at once. The specifications strings use the `xspec syntax`_.
+
+.. _`xspec syntax`: http://codespeak.net/execnet/basics.html#xspec
+
+.. _`socketserver.py`: http://bitbucket.org/hpk42/execnet/raw/2af991418160/execnet/script/socketserver.py
+
+.. _`execnet`: http://codespeak.net/execnet
+
+Specifying test exec environments in an ini file
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+pytest (since version 2.0) supports ini-style configuration.
+For example, you could make running with three subprocesses your default::
+
+ [pytest]
+ addopts = -n3
+
+You can also add default environments like this::
+
+ [pytest]
+ addopts = --tx ssh=myhost//python=python2.7 --tx ssh=myhost//python=python2.6
+
+and then just type::
+
+ py.test --dist=each
+
+to run tests in each of the environments.
+
+Specifying "rsync" dirs in an ini-file
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+In a ``tox.ini`` or ``setup.cfg`` file in your root project directory
+you may specify directories to include or to exclude in synchronisation::
+
+ [pytest]
+ rsyncdirs = . mypkg helperpkg
+ rsyncignore = .hg
+
+These directory specifications are relative to the directory
+where the configuration file was found.
+
+.. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist
+.. _`pytest-xdist repository`: http://bitbucket.org/pytest-dev/pytest-xdist
+.. _`pytest`: http://pytest.org
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/xunit_setup.rst b/testing/web-platform/tests/tools/pytest/doc/en/xunit_setup.rst
new file mode 100644
index 000000000..7a80f1299
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/xunit_setup.rst
@@ -0,0 +1,90 @@
+
+.. _`classic xunit`:
+.. _xunitsetup:
+
+classic xunit-style setup
+========================================
+
+This section describes a classic and popular way how you can implement
+fixtures (setup and teardown test state) on a per-module/class/function basis.
+pytest started supporting these methods around 2005 and subsequently
+nose and the standard library introduced them (under slightly different
+names). While these setup/teardown methods are and will remain fully
+supported you may also use pytest's more powerful :ref:`fixture mechanism
+<fixture>` which leverages the concept of dependency injection, allowing
+for a more modular and more scalable approach for managing test state,
+especially for larger projects and for functional testing. You can
+mix both fixture mechanisms in the same file but unittest-based
+test methods cannot receive fixture arguments.
+
+.. note::
+
+ As of pytest-2.4, teardownX functions are not called if
+ setupX existed and failed/was skipped. This harmonizes
+ behaviour across all major python testing tools.
+
+Module level setup/teardown
+--------------------------------------
+
+If you have multiple test functions and test classes in a single
+module you can optionally implement the following fixture methods
+which will usually be called once for all the functions::
+
+ def setup_module(module):
+ """ setup any state specific to the execution of the given module."""
+
+ def teardown_module(module):
+ """ teardown any state that was previously setup with a setup_module
+ method.
+ """
+
+Class level setup/teardown
+----------------------------------
+
+Similarly, the following methods are called at class level before
+and after all test methods of the class are called::
+
+ @classmethod
+ def setup_class(cls):
+ """ setup any state specific to the execution of the given class (which
+ usually contains tests).
+ """
+
+ @classmethod
+ def teardown_class(cls):
+ """ teardown any state that was previously setup with a call to
+ setup_class.
+ """
+
+Method and function level setup/teardown
+-----------------------------------------------
+
+Similarly, the following methods are called around each method invocation::
+
+ def setup_method(self, method):
+ """ setup any state tied to the execution of the given method in a
+ class. setup_method is invoked for every test method of a class.
+ """
+
+ def teardown_method(self, method):
+ """ teardown any state that was previously setup with a setup_method
+ call.
+ """
+
+If you would rather define test functions directly at module level
+you can also use the following functions to implement fixtures::
+
+ def setup_function(function):
+ """ setup any state tied to the execution of the given function.
+ Invoked for every test function in the module.
+ """
+
+ def teardown_function(function):
+ """ teardown any state that was previously setup with a setup_function
+ call.
+ """
+
+Note that it is possible for setup/teardown pairs to be invoked multiple times
+per testing process.
+
+.. _`unittest.py module`: http://docs.python.org/library/unittest.html
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/yieldfixture.rst b/testing/web-platform/tests/tools/pytest/doc/en/yieldfixture.rst
new file mode 100644
index 000000000..ee88a27df
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/yieldfixture.rst
@@ -0,0 +1,100 @@
+.. _yieldfixture:
+
+Fixture functions using "yield" / context manager integration
+---------------------------------------------------------------
+
+.. versionadded:: 2.4
+
+.. regendoc:wipe
+
+pytest-2.4 allows fixture functions to seamlessly use a ``yield`` instead
+of a ``return`` statement to provide a fixture value while otherwise
+fully supporting all other fixture features.
+
+Let's look at a simple standalone-example using the ``yield`` syntax::
+
+ # content of test_yield.py
+
+ import pytest
+
+ @pytest.yield_fixture
+ def passwd():
+ print ("\nsetup before yield")
+ f = open("/etc/passwd")
+ yield f.readlines()
+ print ("teardown after yield")
+ f.close()
+
+ def test_has_lines(passwd):
+ print ("test called")
+ assert passwd
+
+In contrast to :ref:`finalization through registering callbacks
+<finalization>`, our fixture function used a ``yield``
+statement to provide the lines of the ``/etc/passwd`` file.
+The code after the ``yield`` statement serves as the teardown code,
+avoiding the indirection of registering a teardown callback function.
+
+Let's run it with output capturing disabled::
+
+ $ py.test -q -s test_yield.py
+
+ setup before yield
+ test called
+ .teardown after yield
+
+ 1 passed in 0.12 seconds
+
+We can also seamlessly use the new syntax with ``with`` statements.
+Let's simplify the above ``passwd`` fixture::
+
+ # content of test_yield2.py
+
+ import pytest
+
+ @pytest.yield_fixture
+ def passwd():
+ with open("/etc/passwd") as f:
+ yield f.readlines()
+
+ def test_has_lines(passwd):
+ assert len(passwd) >= 1
+
+The file ``f`` will be closed after the test finished execution
+because the Python ``file`` object supports finalization when
+the ``with`` statement ends.
+
+Note that the yield fixture form supports all other fixture
+features such as ``scope``, ``params``, etc., thus changing existing
+fixture functions to use ``yield`` is straightforward.
+
+.. note::
+
+ While the ``yield`` syntax is similar to what
+ :py:func:`contextlib.contextmanager` decorated functions
+ provide, with pytest fixture functions the part after the
+ "yield" will always be invoked, independently from the
+ exception status of the test function which uses the fixture.
+ This behaviour makes sense if you consider that many different
+ test functions might use a module or session scoped fixture.
+
+
+Discussion and future considerations / feedback
+++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+There are some topics that are worth mentioning:
+
+- usually ``yield`` is used for producing multiple values.
+ But fixture functions can only yield exactly one value.
+ Yielding a second fixture value will get you an error.
+ It's possible we can evolve pytest to allow for producing
+ multiple values as an alternative to current parametrization.
+ For now, you can just use the normal
+ :ref:`fixture parametrization <fixture-parametrize>`
+ mechanisms together with ``yield``-style fixtures.
+
+- lastly ``yield`` introduces more than one way to write
+ fixture functions, so what's the obvious way to a newcomer?
+
+If you want to feedback or participate in discussion of the above
+topics, please join our :ref:`contact channels`, you are most welcome.
diff --git a/testing/web-platform/tests/tools/pytest/extra/get_issues.py b/testing/web-platform/tests/tools/pytest/extra/get_issues.py
new file mode 100644
index 000000000..6437ba4c3
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/extra/get_issues.py
@@ -0,0 +1,74 @@
+import json
+import py
+import textwrap
+
+issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
+
+import requests
+
+def get_issues():
+ chunksize = 50
+ start = 0
+ issues = []
+ while 1:
+ post_data = {"accountname": "pytest-dev",
+ "repo_slug": "pytest",
+ "start": start,
+ "limit": chunksize}
+ print ("getting from", start)
+ r = requests.get(issues_url, params=post_data)
+ data = r.json()
+ issues.extend(data["issues"])
+ if start + chunksize >= data["count"]:
+ return issues
+ start += chunksize
+
+kind2num = "bug enhancement task proposal".split()
+
+status2num = "new open resolved duplicate invalid wontfix".split()
+
+def main(args):
+ cachefile = py.path.local(args.cache)
+ if not cachefile.exists() or args.refresh:
+ issues = get_issues()
+ cachefile.write(json.dumps(issues))
+ else:
+ issues = json.loads(cachefile.read())
+
+ open_issues = [x for x in issues
+ if x["status"] in ("new", "open")]
+
+ def kind_and_id(x):
+ kind = x["metadata"]["kind"]
+ return kind2num.index(kind), len(issues)-int(x["local_id"])
+ open_issues.sort(key=kind_and_id)
+ report(open_issues)
+
+def report(issues):
+ for issue in issues:
+ metadata = issue["metadata"]
+ priority = issue["priority"]
+ title = issue["title"]
+ content = issue["content"]
+ kind = metadata["kind"]
+ status = issue["status"]
+ id = issue["local_id"]
+ link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
+ print("----")
+ print(status, kind, link)
+ print(title)
+ #print()
+ #lines = content.split("\n")
+ #print ("\n".join(lines[:3]))
+ #if len(lines) > 3 or len(content) > 240:
+ # print ("...")
+
+if __name__ == "__main__":
+ import argparse
+ parser = argparse.ArgumentParser("process bitbucket issues")
+ parser.add_argument("--refresh", action="store_true",
+ help="invalidate cache, refresh issues")
+ parser.add_argument("--cache", action="store", default="issues.json",
+ help="cache file")
+ args = parser.parse_args()
+ main(args)
diff --git a/testing/web-platform/tests/tools/pytest/extra/setup-py.test/setup.py b/testing/web-platform/tests/tools/pytest/extra/setup-py.test/setup.py
new file mode 100644
index 000000000..d0560ce1f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/extra/setup-py.test/setup.py
@@ -0,0 +1,11 @@
+import sys
+from distutils.core import setup
+
+if __name__ == "__main__":
+ if "sdist" not in sys.argv[1:]:
+ raise ValueError("please use 'pytest' pypi package instead of 'py.test'")
+ setup(
+ name="py.test",
+ version="0.0",
+ description="please use 'pytest' for installation",
+ )
diff --git a/testing/web-platform/tests/tools/pytest/plugin-test.sh b/testing/web-platform/tests/tools/pytest/plugin-test.sh
new file mode 100644
index 000000000..9c61b5053
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/plugin-test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# this assumes plugins are installed as sister directories
+
+set -e
+cd ../pytest-pep8
+py.test
+cd ../pytest-instafail
+py.test
+cd ../pytest-cache
+py.test
+cd ../pytest-xprocess
+py.test
+#cd ../pytest-cov
+#py.test
+cd ../pytest-capturelog
+py.test
+cd ../pytest-xdist
+py.test
+
diff --git a/testing/web-platform/tests/tools/pytest/pytest.py b/testing/web-platform/tests/tools/pytest/pytest.py
new file mode 100644
index 000000000..e376e417e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/pytest.py
@@ -0,0 +1,28 @@
+# PYTHON_ARGCOMPLETE_OK
+"""
+pytest: unit and functional testing with Python.
+"""
+__all__ = [
+ 'main',
+ 'UsageError',
+ 'cmdline',
+ 'hookspec',
+ 'hookimpl',
+ '__version__',
+]
+
+if __name__ == '__main__': # if run as a script or by 'python -m pytest'
+ # we trigger the below "else" condition by the following import
+ import pytest
+ raise SystemExit(pytest.main())
+
+# else we are imported
+
+from _pytest.config import (
+ main, UsageError, _preloadplugins, cmdline,
+ hookspec, hookimpl
+)
+from _pytest import __version__
+
+_preloadplugins() # to populate pytest.* namespace so help(pytest) works
+
diff --git a/testing/web-platform/tests/tools/pytest/requirements-docs.txt b/testing/web-platform/tests/tools/pytest/requirements-docs.txt
new file mode 100644
index 000000000..be3a232e5
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/requirements-docs.txt
@@ -0,0 +1,3 @@
+sphinx==1.2.3
+regendoc
+pyyaml
diff --git a/testing/web-platform/tests/tools/pytest/runtox.py b/testing/web-platform/tests/tools/pytest/runtox.py
new file mode 100644
index 000000000..8c13c53e1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/runtox.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+
+if __name__ == "__main__":
+ import subprocess
+ import sys
+ subprocess.call([sys.executable, "-m", "tox",
+ "-i", "ALL=https://devpi.net/hpk/dev/",
+ "--develop"] + sys.argv[1:])
diff --git a/testing/web-platform/tests/tools/pytest/setup.cfg b/testing/web-platform/tests/tools/pytest/setup.cfg
new file mode 100644
index 000000000..1ab4fd059
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/setup.cfg
@@ -0,0 +1,13 @@
+[build_sphinx]
+source-dir = doc/en/
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/en/build/html
+
+[bdist_wheel]
+universal = 1
+
+[devpi:upload]
+formats = sdist.tgz,bdist_wheel
diff --git a/testing/web-platform/tests/tools/pytest/setup.py b/testing/web-platform/tests/tools/pytest/setup.py
new file mode 100644
index 000000000..6660f2160
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/setup.py
@@ -0,0 +1,122 @@
+import os, sys
+import setuptools
+import pkg_resources
+from setuptools import setup, Command
+
+classifiers = ['Development Status :: 6 - Mature',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: POSIX',
+ 'Operating System :: Microsoft :: Windows',
+ 'Operating System :: MacOS :: MacOS X',
+ 'Topic :: Software Development :: Testing',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Utilities'] + [
+ ('Programming Language :: Python :: %s' % x) for x in
+ '2 2.6 2.7 3 3.2 3.3 3.4 3.5'.split()]
+
+with open('README.rst') as fd:
+ long_description = fd.read()
+
+def get_version():
+ p = os.path.join(os.path.dirname(
+ os.path.abspath(__file__)), "_pytest", "__init__.py")
+ with open(p) as f:
+ for line in f.readlines():
+ if "__version__" in line:
+ return line.strip().split("=")[-1].strip(" '")
+ raise ValueError("could not read version")
+
+
+def has_environment_marker_support():
+ """
+ Tests that setuptools has support for PEP-426 environment marker support.
+
+ The first known release to support it is 0.7 (and the earliest on PyPI seems to be 0.7.2
+ so we're using that), see: http://pythonhosted.org/setuptools/history.html#id142
+
+ References:
+
+ * https://wheel.readthedocs.org/en/latest/index.html#defining-conditional-dependencies
+ * https://www.python.org/dev/peps/pep-0426/#environment-markers
+ """
+ try:
+ return pkg_resources.parse_version(setuptools.__version__) >= pkg_resources.parse_version('0.7.2')
+ except Exception as exc:
+ sys.stderr.write("Could not test setuptool's version: %s\n" % exc)
+ return False
+
+
+def main():
+ install_requires = ['py>=1.4.29'] # pluggy is vendored in _pytest.vendored_packages
+ extras_require = {}
+ if has_environment_marker_support():
+ extras_require[':python_version=="2.6" or python_version=="3.0" or python_version=="3.1"'] = ['argparse']
+ extras_require[':sys_platform=="win32"'] = ['colorama']
+ else:
+ if sys.version_info < (2, 7) or (3,) <= sys.version_info < (3, 2):
+ install_requires.append('argparse')
+ if sys.platform == 'win32':
+ install_requires.append('colorama')
+
+ setup(
+ name='pytest',
+ description='pytest: simple powerful testing with Python',
+ long_description=long_description,
+ version=get_version(),
+ url='http://pytest.org',
+ license='MIT license',
+ platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
+ author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others',
+ author_email='holger at merlinux.eu',
+ entry_points=make_entry_points(),
+ classifiers=classifiers,
+ cmdclass={'test': PyTest},
+ # the following should be enabled for release
+ install_requires=install_requires,
+ extras_require=extras_require,
+ packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'],
+ py_modules=['pytest'],
+ zip_safe=False,
+ )
+
+
+def cmdline_entrypoints(versioninfo, platform, basename):
+ target = 'pytest:main'
+ if platform.startswith('java'):
+ points = {'py.test-jython': target}
+ else:
+ if basename.startswith('pypy'):
+ points = {'py.test-%s' % basename: target}
+ else: # cpython
+ points = {'py.test-%s.%s' % versioninfo[:2] : target}
+ points['py.test'] = target
+ return points
+
+
+def make_entry_points():
+ basename = os.path.basename(sys.executable)
+ points = cmdline_entrypoints(sys.version_info, sys.platform, basename)
+ keys = list(points.keys())
+ keys.sort()
+ l = ['%s = %s' % (x, points[x]) for x in keys]
+ return {'console_scripts': l}
+
+
+class PyTest(Command):
+ user_options = []
+ def initialize_options(self):
+ pass
+ def finalize_options(self):
+ pass
+ def run(self):
+ import subprocess
+ PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
+ PPATH.insert(0, os.getcwd())
+ os.environ['PYTHONPATH'] = ':'.join(PPATH)
+ errno = subprocess.call([sys.executable, 'pytest.py', '--ignore=doc'])
+ raise SystemExit(errno)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/web-platform/tests/tools/pytest/testing/acceptance_test.py b/testing/web-platform/tests/tools/pytest/testing/acceptance_test.py
new file mode 100644
index 000000000..9bc3a191a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/acceptance_test.py
@@ -0,0 +1,695 @@
+import sys
+
+import _pytest._code
+import py
+import pytest
+from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
+
+
+class TestGeneralUsage:
+ def test_config_error(self, testdir):
+ testdir.makeconftest("""
+ def pytest_configure(config):
+ import pytest
+ raise pytest.UsageError("hello")
+ """)
+ result = testdir.runpytest(testdir.tmpdir)
+ assert result.ret != 0
+ result.stderr.fnmatch_lines([
+ '*ERROR: hello'
+ ])
+
+ def test_root_conftest_syntax_error(self, testdir):
+ testdir.makepyfile(conftest="raise SyntaxError\n")
+ result = testdir.runpytest()
+ result.stderr.fnmatch_lines(["*raise SyntaxError*"])
+ assert result.ret != 0
+
+ def test_early_hook_error_issue38_1(self, testdir):
+ testdir.makeconftest("""
+ def pytest_sessionstart():
+ 0 / 0
+ """)
+ result = testdir.runpytest(testdir.tmpdir)
+ assert result.ret != 0
+ # tracestyle is native by default for hook failures
+ result.stdout.fnmatch_lines([
+ '*INTERNALERROR*File*conftest.py*line 2*',
+ '*0 / 0*',
+ ])
+ result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
+ assert result.ret != 0
+ # tracestyle is native by default for hook failures
+ result.stdout.fnmatch_lines([
+ '*INTERNALERROR*def pytest_sessionstart():*',
+ '*INTERNALERROR*0 / 0*',
+ ])
+
+ def test_early_hook_configure_error_issue38(self, testdir):
+ testdir.makeconftest("""
+ def pytest_configure():
+ 0 / 0
+ """)
+ result = testdir.runpytest(testdir.tmpdir)
+ assert result.ret != 0
+ # here we get it on stderr
+ result.stderr.fnmatch_lines([
+ '*INTERNALERROR*File*conftest.py*line 2*',
+ '*0 / 0*',
+ ])
+
+ def test_file_not_found(self, testdir):
+ result = testdir.runpytest("asd")
+ assert result.ret != 0
+ result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
+
+ def test_file_not_found_unconfigure_issue143(self, testdir):
+ testdir.makeconftest("""
+ def pytest_configure():
+ print("---configure")
+ def pytest_unconfigure():
+ print("---unconfigure")
+ """)
+ result = testdir.runpytest("-s", "asd")
+ assert result.ret == 4 # EXIT_USAGEERROR
+ result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
+ result.stdout.fnmatch_lines([
+ "*---configure",
+ "*---unconfigure",
+ ])
+
+
+ def test_config_preparse_plugin_option(self, testdir):
+ testdir.makepyfile(pytest_xyz="""
+ def pytest_addoption(parser):
+ parser.addoption("--xyz", dest="xyz", action="store")
+ """)
+ testdir.makepyfile(test_one="""
+ def test_option(pytestconfig):
+ assert pytestconfig.option.xyz == "123"
+ """)
+ result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ '*1 passed*',
+ ])
+
+ def test_assertion_magic(self, testdir):
+ p = testdir.makepyfile("""
+ def test_this():
+ x = 0
+ assert x
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "> assert x",
+ "E assert 0",
+ ])
+ assert result.ret == 1
+
+ def test_nested_import_error(self, testdir):
+ p = testdir.makepyfile("""
+ import import_fails
+ def test_this():
+ assert import_fails.a == 1
+ """)
+ testdir.makepyfile(import_fails="import does_not_work")
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ #XXX on jython this fails: "> import import_fails",
+ "E ImportError: No module named *does_not_work*",
+ ])
+ assert result.ret == 1
+
+ def test_not_collectable_arguments(self, testdir):
+ p1 = testdir.makepyfile("")
+ p2 = testdir.makefile(".pyc", "123")
+ result = testdir.runpytest(p1, p2)
+ assert result.ret
+ result.stderr.fnmatch_lines([
+ "*ERROR: not found:*%s" %(p2.basename,)
+ ])
+
+ def test_issue486_better_reporting_on_conftest_load_failure(self, testdir):
+ testdir.makepyfile("")
+ testdir.makeconftest("import qwerty")
+ result = testdir.runpytest("--help")
+ result.stdout.fnmatch_lines("""
+ *--version*
+ *warning*conftest.py*
+ """)
+ result = testdir.runpytest()
+ result.stderr.fnmatch_lines("""
+ *ERROR*could not load*conftest.py*
+ """)
+
+
+ def test_early_skip(self, testdir):
+ testdir.mkdir("xyz")
+ testdir.makeconftest("""
+ import pytest
+ def pytest_collect_directory():
+ pytest.skip("early")
+ """)
+ result = testdir.runpytest()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result.stdout.fnmatch_lines([
+ "*1 skip*"
+ ])
+
+ def test_issue88_initial_file_multinodes(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ class MyFile(pytest.File):
+ def collect(self):
+ return [MyItem("hello", parent=self)]
+ def pytest_collect_file(path, parent):
+ return MyFile(path, parent)
+ class MyItem(pytest.Item):
+ pass
+ """)
+ p = testdir.makepyfile("def test_hello(): pass")
+ result = testdir.runpytest(p, "--collect-only")
+ result.stdout.fnmatch_lines([
+ "*MyFile*test_issue88*",
+ "*Module*test_issue88*",
+ ])
+
+ def test_issue93_initialnode_importing_capturing(self, testdir):
+ testdir.makeconftest("""
+ import sys
+ print ("should not be seen")
+ sys.stderr.write("stder42\\n")
+ """)
+ result = testdir.runpytest()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ assert "should not be seen" not in result.stdout.str()
+ assert "stderr42" not in result.stderr.str()
+
+ def test_conftest_printing_shows_if_error(self, testdir):
+ testdir.makeconftest("""
+ print ("should be seen")
+ assert 0
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ assert "should be seen" in result.stdout.str()
+
+ @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
+ reason="symlink not available on this platform")
+ def test_chdir(self, testdir):
+ testdir.tmpdir.join("py").mksymlinkto(py._pydir)
+ p = testdir.tmpdir.join("main.py")
+ p.write(_pytest._code.Source("""
+ import sys, os
+ sys.path.insert(0, '')
+ import py
+ print (py.__file__)
+ print (py.__path__)
+ os.chdir(os.path.dirname(os.getcwd()))
+ print (py.log)
+ """))
+ result = testdir.runpython(p)
+ assert not result.ret
+
+ def test_issue109_sibling_conftests_not_loaded(self, testdir):
+ sub1 = testdir.tmpdir.mkdir("sub1")
+ sub2 = testdir.tmpdir.mkdir("sub2")
+ sub1.join("conftest.py").write("assert 0")
+ result = testdir.runpytest(sub2)
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ sub2.ensure("__init__.py")
+ p = sub2.ensure("test_hello.py")
+ result = testdir.runpytest(p)
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result = testdir.runpytest(sub1)
+ assert result.ret == EXIT_USAGEERROR
+
+ def test_directory_skipped(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_ignore_collect():
+ pytest.skip("intentional")
+ """)
+ testdir.makepyfile("def test_hello(): pass")
+ result = testdir.runpytest()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result.stdout.fnmatch_lines([
+ "*1 skipped*"
+ ])
+
+ def test_multiple_items_per_collector_byid(self, testdir):
+ c = testdir.makeconftest("""
+ import pytest
+ class MyItem(pytest.Item):
+ def runtest(self):
+ pass
+ class MyCollector(pytest.File):
+ def collect(self):
+ return [MyItem(name="xyz", parent=self)]
+ def pytest_collect_file(path, parent):
+ if path.basename.startswith("conftest"):
+ return MyCollector(path, parent)
+ """)
+ result = testdir.runpytest(c.basename+"::"+"xyz")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*1 pass*",
+ ])
+
+ def test_skip_on_generated_funcarg_id(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall({'x': 3}, id='hello-123')
+ def pytest_runtest_setup(item):
+ print (item.keywords)
+ if 'hello-123' in item.keywords:
+ pytest.skip("hello")
+ assert 0
+ """)
+ p = testdir.makepyfile("""def test_func(x): pass""")
+ res = testdir.runpytest(p)
+ assert res.ret == 0
+ res.stdout.fnmatch_lines(["*1 skipped*"])
+
+ def test_direct_addressing_selects(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall({'i': 1}, id="1")
+ metafunc.addcall({'i': 2}, id="2")
+ def test_func(i):
+ pass
+ """)
+ res = testdir.runpytest(p.basename + "::" + "test_func[1]")
+ assert res.ret == 0
+ res.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_direct_addressing_notfound(self, testdir):
+ p = testdir.makepyfile("""
+ def test_func():
+ pass
+ """)
+ res = testdir.runpytest(p.basename + "::" + "test_notfound")
+ assert res.ret
+ res.stderr.fnmatch_lines(["*ERROR*not found*"])
+
+ def test_docstring_on_hookspec(self):
+ from _pytest import hookspec
+ for name, value in vars(hookspec).items():
+ if name.startswith("pytest_"):
+ assert value.__doc__, "no docstring for %s" % name
+
+ def test_initialization_error_issue49(self, testdir):
+ testdir.makeconftest("""
+ def pytest_configure():
+ x
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 3 # internal error
+ result.stderr.fnmatch_lines([
+ "INTERNAL*pytest_configure*",
+ "INTERNAL*x*",
+ ])
+ assert 'sessionstarttime' not in result.stderr.str()
+
+ @pytest.mark.parametrize('lookfor', ['test_fun.py', 'test_fun.py::test_a'])
+ def test_issue134_report_syntaxerror_when_collecting_member(self, testdir, lookfor):
+ testdir.makepyfile(test_fun="""
+ def test_a():
+ pass
+ def""")
+ result = testdir.runpytest(lookfor)
+ result.stdout.fnmatch_lines(['*SyntaxError*'])
+ if '::' in lookfor:
+ result.stderr.fnmatch_lines([
+ '*ERROR*',
+ ])
+ assert result.ret == 4 # usage error only if item not found
+
+ def test_report_all_failed_collections_initargs(self, testdir):
+ testdir.makepyfile(test_a="def", test_b="def")
+ result = testdir.runpytest("test_a.py::a", "test_b.py::b")
+ result.stderr.fnmatch_lines([
+ "*ERROR*test_a.py::a*",
+ "*ERROR*test_b.py::b*",
+ ])
+
+ def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
+ # Ref #383. Python 3.3's namespace package messed with our import hooks
+ # Importing a module that didn't exist, even if the ImportError was
+ # gracefully handled, would make our test crash.
+ testdir.mkdir('not_a_package')
+ p = testdir.makepyfile("""
+ try:
+ from not_a_package import doesnt_exist
+ except ImportError:
+ # We handle the import error gracefully here
+ pass
+
+ def test_whatever():
+ pass
+ """)
+ res = testdir.runpytest(p.basename)
+ assert res.ret == 0
+
+ def test_unknown_option(self, testdir):
+ result = testdir.runpytest("--qwlkej")
+ result.stderr.fnmatch_lines("""
+ *unrecognized*
+ """)
+
+ def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
+ monkeypatch.setattr("inspect.getsourcelines", None)
+ p = testdir.makepyfile("""
+ def raise_error(obj):
+ raise IOError('source code not available')
+
+ import inspect
+ inspect.getsourcelines = raise_error
+
+ def test_foo(invalid_fixture):
+ pass
+ """)
+ res = testdir.runpytest(p)
+ res.stdout.fnmatch_lines([
+ "*source code not available*",
+ "*fixture 'invalid_fixture' not found",
+ ])
+
+ def test_plugins_given_as_strings(self, tmpdir, monkeypatch):
+ """test that str values passed to main() as `plugins` arg
+ are interpreted as module names to be imported and registered.
+ #855.
+ """
+ with pytest.raises(ImportError) as excinfo:
+ pytest.main([str(tmpdir)], plugins=['invalid.module'])
+ assert 'invalid' in str(excinfo.value)
+
+ p = tmpdir.join('test_test_plugins_given_as_strings.py')
+ p.write('def test_foo(): pass')
+ mod = py.std.types.ModuleType("myplugin")
+ monkeypatch.setitem(sys.modules, 'myplugin', mod)
+ assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0
+
+ def test_parameterized_with_bytes_regex(self, testdir):
+ p = testdir.makepyfile("""
+ import re
+ import pytest
+ @pytest.mark.parametrize('r', [re.compile(b'foo')])
+ def test_stuff(r):
+ pass
+ """
+ )
+ res = testdir.runpytest(p)
+ res.stdout.fnmatch_lines([
+ '*1 passed*'
+ ])
+
+
+class TestInvocationVariants:
+ def test_earlyinit(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ assert hasattr(pytest, 'mark')
+ """)
+ result = testdir.runpython(p)
+ assert result.ret == 0
+
+ @pytest.mark.xfail("sys.platform.startswith('java')")
+ def test_pydoc(self, testdir):
+ for name in ('py.test', 'pytest'):
+ result = testdir.runpython_c("import %s;help(%s)" % (name, name))
+ assert result.ret == 0
+ s = result.stdout.str()
+ assert 'MarkGenerator' in s
+
+ def test_import_star_py_dot_test(self, testdir):
+ p = testdir.makepyfile("""
+ from py.test import *
+ #collect
+ #cmdline
+ #Item
+ #assert collect.Item is Item
+ #assert collect.Collector is Collector
+ main
+ skip
+ xfail
+ """)
+ result = testdir.runpython(p)
+ assert result.ret == 0
+
+ def test_import_star_pytest(self, testdir):
+ p = testdir.makepyfile("""
+ from pytest import *
+ #Item
+ #File
+ main
+ skip
+ xfail
+ """)
+ result = testdir.runpython(p)
+ assert result.ret == 0
+
+ def test_double_pytestcmdline(self, testdir):
+ p = testdir.makepyfile(run="""
+ import pytest
+ pytest.main()
+ pytest.main()
+ """)
+ testdir.makepyfile("""
+ def test_hello():
+ pass
+ """)
+ result = testdir.runpython(p)
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ "*1 passed*",
+ ])
+
+ def test_python_minus_m_invocation_ok(self, testdir):
+ p1 = testdir.makepyfile("def test_hello(): pass")
+ res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
+ assert res.ret == 0
+
+ def test_python_minus_m_invocation_fail(self, testdir):
+ p1 = testdir.makepyfile("def test_fail(): 0/0")
+ res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
+ assert res.ret == 1
+
+ def test_python_pytest_package(self, testdir):
+ p1 = testdir.makepyfile("def test_pass(): pass")
+ res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
+ assert res.ret == 0
+ res.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_equivalence_pytest_pytest(self):
+ assert pytest.main == py.test.cmdline.main
+
+ def test_invoke_with_string(self, capsys):
+ retcode = pytest.main("-h")
+ assert not retcode
+ out, err = capsys.readouterr()
+ assert "--help" in out
+ pytest.raises(ValueError, lambda: pytest.main(0))
+
+ def test_invoke_with_path(self, tmpdir, capsys):
+ retcode = pytest.main(tmpdir)
+ assert retcode == EXIT_NOTESTSCOLLECTED
+ out, err = capsys.readouterr()
+
+ def test_invoke_plugin_api(self, testdir, capsys):
+ class MyPlugin:
+ def pytest_addoption(self, parser):
+ parser.addoption("--myopt")
+
+ pytest.main(["-h"], plugins=[MyPlugin()])
+ out, err = capsys.readouterr()
+ assert "--myopt" in out
+
+ def test_pyargs_importerror(self, testdir, monkeypatch):
+ monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
+ path = testdir.mkpydir("tpkg")
+ path.join("test_hello.py").write('raise ImportError')
+
+ result = testdir.runpytest("--pyargs", "tpkg.test_hello")
+ assert result.ret != 0
+ # FIXME: It would be more natural to match NOT
+ # "ERROR*file*or*package*not*found*".
+ result.stdout.fnmatch_lines([
+ "*collected 0 items*"
+ ])
+
+ def test_cmdline_python_package(self, testdir, monkeypatch):
+ monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
+ path = testdir.mkpydir("tpkg")
+ path.join("test_hello.py").write("def test_hello(): pass")
+ path.join("test_world.py").write("def test_world(): pass")
+ result = testdir.runpytest("--pyargs", "tpkg")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*2 passed*"
+ ])
+ result = testdir.runpytest("--pyargs", "tpkg.test_hello")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+ def join_pythonpath(what):
+ cur = py.std.os.environ.get('PYTHONPATH')
+ if cur:
+ return str(what) + ':' + cur
+ return what
+ empty_package = testdir.mkpydir("empty_package")
+ monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
+ result = testdir.runpytest("--pyargs", ".")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*2 passed*"
+ ])
+
+ monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))
+ path.join('test_hello.py').remove()
+ result = testdir.runpytest("--pyargs", "tpkg.test_hello")
+ assert result.ret != 0
+ result.stderr.fnmatch_lines([
+ "*not*found*test_hello*",
+ ])
+
+ def test_cmdline_python_package_not_exists(self, testdir):
+ result = testdir.runpytest("--pyargs", "tpkgwhatv")
+ assert result.ret
+ result.stderr.fnmatch_lines([
+ "ERROR*file*or*package*not*found*",
+ ])
+
+ @pytest.mark.xfail(reason="decide: feature or bug")
+ def test_noclass_discovery_if_not_testcase(self, testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ class TestHello(object):
+ def test_hello(self):
+ assert self.attr
+
+ class RealTest(unittest.TestCase, TestHello):
+ attr = 42
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=1)
+
+ def test_doctest_id(self, testdir):
+ testdir.makefile('.txt', """
+ >>> x=3
+ >>> x
+ 4
+ """)
+ result = testdir.runpytest("-rf")
+ lines = result.stdout.str().splitlines()
+ for line in lines:
+ if line.startswith("FAIL "):
+ testid = line[5:].strip()
+ break
+ result = testdir.runpytest(testid, '-rf')
+ result.stdout.fnmatch_lines([
+ line,
+ "*1 failed*",
+ ])
+
+ def test_core_backward_compatibility(self):
+ """Test backward compatibility for get_plugin_manager function. See #787."""
+ import _pytest.config
+ assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
+
+
+ def test_has_plugin(self, request):
+ """Test hasplugin function of the plugin manager (#932)."""
+ assert request.config.pluginmanager.hasplugin('python')
+
+
+class TestDurations:
+ source = """
+ import time
+ frag = 0.002
+ def test_something():
+ pass
+ def test_2():
+ time.sleep(frag*5)
+ def test_1():
+ time.sleep(frag)
+ def test_3():
+ time.sleep(frag*10)
+ """
+
+ def test_calls(self, testdir):
+ testdir.makepyfile(self.source)
+ result = testdir.runpytest("--durations=10")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines_random([
+ "*durations*",
+ "*call*test_3*",
+ "*call*test_2*",
+ "*call*test_1*",
+ ])
+
+ def test_calls_show_2(self, testdir):
+ testdir.makepyfile(self.source)
+ result = testdir.runpytest("--durations=2")
+ assert result.ret == 0
+ lines = result.stdout.get_lines_after("*slowest*durations*")
+ assert "4 passed" in lines[2]
+
+ def test_calls_showall(self, testdir):
+ testdir.makepyfile(self.source)
+ result = testdir.runpytest("--durations=0")
+ assert result.ret == 0
+ for x in "123":
+ for y in 'call',: #'setup', 'call', 'teardown':
+ for line in result.stdout.lines:
+ if ("test_%s" % x) in line and y in line:
+ break
+ else:
+ raise AssertionError("not found %s %s" % (x,y))
+
+ def test_with_deselected(self, testdir):
+ testdir.makepyfile(self.source)
+ result = testdir.runpytest("--durations=2", "-k test_1")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*durations*",
+ "*call*test_1*",
+ ])
+
+ def test_with_failing_collection(self, testdir):
+ testdir.makepyfile(self.source)
+ testdir.makepyfile(test_collecterror="""xyz""")
+ result = testdir.runpytest("--durations=2", "-k test_1")
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*durations*",
+ "*call*test_1*",
+ ])
+
+
+class TestDurationWithFixture:
+ source = """
+ import time
+ frag = 0.001
+ def setup_function(func):
+ time.sleep(frag * 3)
+ def test_1():
+ time.sleep(frag*2)
+ def test_2():
+ time.sleep(frag)
+ """
+ def test_setup_function(self, testdir):
+ testdir.makepyfile(self.source)
+ result = testdir.runpytest("--durations=10")
+ assert result.ret == 0
+
+ result.stdout.fnmatch_lines_random("""
+ *durations*
+ * setup *test_1*
+ * call *test_1*
+ """)
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/code/test_code.py b/testing/web-platform/tests/tools/pytest/testing/code/test_code.py
new file mode 100644
index 000000000..0db4ad2ab
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/code/test_code.py
@@ -0,0 +1,174 @@
+import sys
+
+import _pytest._code
+import py
+import pytest
+
+
+def test_ne():
+ code1 = _pytest._code.Code(compile('foo = "bar"', '', 'exec'))
+ assert code1 == code1
+ code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec'))
+ assert code2 != code1
+
+def test_code_gives_back_name_for_not_existing_file():
+ name = 'abc-123'
+ co_code = compile("pass\n", name, 'exec')
+ assert co_code.co_filename == name
+ code = _pytest._code.Code(co_code)
+ assert str(code.path) == name
+ assert code.fullsource is None
+
+def test_code_with_class():
+ class A:
+ pass
+ pytest.raises(TypeError, "_pytest._code.Code(A)")
+
+if True:
+ def x():
+ pass
+
+def test_code_fullsource():
+ code = _pytest._code.Code(x)
+ full = code.fullsource
+ assert 'test_code_fullsource()' in str(full)
+
+def test_code_source():
+ code = _pytest._code.Code(x)
+ src = code.source()
+ expected = """def x():
+ pass"""
+ assert str(src) == expected
+
+def test_frame_getsourcelineno_myself():
+ def func():
+ return sys._getframe(0)
+ f = func()
+ f = _pytest._code.Frame(f)
+ source, lineno = f.code.fullsource, f.lineno
+ assert source[lineno].startswith(" return sys._getframe(0)")
+
+def test_getstatement_empty_fullsource():
+ def func():
+ return sys._getframe(0)
+ f = func()
+ f = _pytest._code.Frame(f)
+ prop = f.code.__class__.fullsource
+ try:
+ f.code.__class__.fullsource = None
+ assert f.statement == _pytest._code.Source("")
+ finally:
+ f.code.__class__.fullsource = prop
+
+def test_code_from_func():
+ co = _pytest._code.Code(test_frame_getsourcelineno_myself)
+ assert co.firstlineno
+ assert co.path
+
+
+
+def test_builtin_patch_unpatch(monkeypatch):
+ cpy_builtin = py.builtin.builtins
+ comp = cpy_builtin.compile
+ def mycompile(*args, **kwargs):
+ return comp(*args, **kwargs)
+ class Sub(AssertionError):
+ pass
+ monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
+ monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
+ _pytest._code.patch_builtins()
+ assert cpy_builtin.AssertionError != Sub
+ assert cpy_builtin.compile != mycompile
+ _pytest._code.unpatch_builtins()
+ assert cpy_builtin.AssertionError is Sub
+ assert cpy_builtin.compile == mycompile
+
+
+def test_unicode_handling():
+ value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
+ def f():
+ raise Exception(value)
+ excinfo = pytest.raises(Exception, f)
+ str(excinfo)
+ if sys.version_info[0] < 3:
+ unicode(excinfo)
+
+
+@pytest.mark.skipif(sys.version_info[0] >= 3, reason='python 2 only issue')
+def test_unicode_handling_syntax_error():
+ value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
+ def f():
+ raise SyntaxError('invalid syntax', (None, 1, 3, value))
+ excinfo = pytest.raises(Exception, f)
+ str(excinfo)
+ if sys.version_info[0] < 3:
+ unicode(excinfo)
+
+def test_code_getargs():
+ def f1(x):
+ pass
+ c1 = _pytest._code.Code(f1)
+ assert c1.getargs(var=True) == ('x',)
+
+ def f2(x, *y):
+ pass
+ c2 = _pytest._code.Code(f2)
+ assert c2.getargs(var=True) == ('x', 'y')
+
+ def f3(x, **z):
+ pass
+ c3 = _pytest._code.Code(f3)
+ assert c3.getargs(var=True) == ('x', 'z')
+
+ def f4(x, *y, **z):
+ pass
+ c4 = _pytest._code.Code(f4)
+ assert c4.getargs(var=True) == ('x', 'y', 'z')
+
+
+def test_frame_getargs():
+ def f1(x):
+ return sys._getframe(0)
+ fr1 = _pytest._code.Frame(f1('a'))
+ assert fr1.getargs(var=True) == [('x', 'a')]
+
+ def f2(x, *y):
+ return sys._getframe(0)
+ fr2 = _pytest._code.Frame(f2('a', 'b', 'c'))
+ assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
+
+ def f3(x, **z):
+ return sys._getframe(0)
+ fr3 = _pytest._code.Frame(f3('a', b='c'))
+ assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
+
+ def f4(x, *y, **z):
+ return sys._getframe(0)
+ fr4 = _pytest._code.Frame(f4('a', 'b', c='d'))
+ assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
+ ('z', {'c': 'd'})]
+
+
+class TestExceptionInfo:
+
+ def test_bad_getsource(self):
+ try:
+ if False: pass
+ else: assert False
+ except AssertionError:
+ exci = _pytest._code.ExceptionInfo()
+ assert exci.getrepr()
+
+
+class TestTracebackEntry:
+
+ def test_getsource(self):
+ try:
+ if False: pass
+ else: assert False
+ except AssertionError:
+ exci = _pytest._code.ExceptionInfo()
+ entry = exci.traceback[0]
+ source = entry.getsource()
+ assert len(source) == 4
+ assert 'else: assert False' in source[3]
diff --git a/testing/web-platform/tests/tools/pytest/testing/code/test_excinfo.py b/testing/web-platform/tests/tools/pytest/testing/code/test_excinfo.py
new file mode 100644
index 000000000..2defa3103
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/code/test_excinfo.py
@@ -0,0 +1,911 @@
+# -*- coding: utf-8 -*-
+
+import _pytest
+import py
+import pytest
+from _pytest._code.code import FormattedExcinfo, ReprExceptionInfo
+
+queue = py.builtin._tryimport('queue', 'Queue')
+
+failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
+from test_source import astonly
+
+try:
+ import importlib
+except ImportError:
+ invalidate_import_caches = None
+else:
+ invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
+
+import pytest
+pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
+
+class TWMock:
+ def __init__(self):
+ self.lines = []
+ def sep(self, sep, line=None):
+ self.lines.append((sep, line))
+ def line(self, line, **kw):
+ self.lines.append(line)
+ def markup(self, text, **kw):
+ return text
+
+ fullwidth = 80
+
+def test_excinfo_simple():
+ try:
+ raise ValueError
+ except ValueError:
+ info = _pytest._code.ExceptionInfo()
+ assert info.type == ValueError
+
+def test_excinfo_getstatement():
+ def g():
+ raise ValueError
+ def f():
+ g()
+ try:
+ f()
+ except ValueError:
+ excinfo = _pytest._code.ExceptionInfo()
+ linenumbers = [_pytest._code.getrawcode(f).co_firstlineno - 1 + 3,
+ _pytest._code.getrawcode(f).co_firstlineno - 1 + 1,
+ _pytest._code.getrawcode(g).co_firstlineno - 1 + 1, ]
+ l = list(excinfo.traceback)
+ foundlinenumbers = [x.lineno for x in l]
+ assert foundlinenumbers == linenumbers
+ #for x in info:
+ # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
+ #xxx
+
+# testchain for getentries test below
+def f():
+ #
+ raise ValueError
+ #
+def g():
+ #
+ __tracebackhide__ = True
+ f()
+ #
+def h():
+ #
+ g()
+ #
+
+class TestTraceback_f_g_h:
+ def setup_method(self, method):
+ try:
+ h()
+ except ValueError:
+ self.excinfo = _pytest._code.ExceptionInfo()
+
+ def test_traceback_entries(self):
+ tb = self.excinfo.traceback
+ entries = list(tb)
+ assert len(tb) == 4 # maybe fragile test
+ assert len(entries) == 4 # maybe fragile test
+ names = ['f', 'g', 'h']
+ for entry in entries:
+ try:
+ names.remove(entry.frame.code.name)
+ except ValueError:
+ pass
+ assert not names
+
+ def test_traceback_entry_getsource(self):
+ tb = self.excinfo.traceback
+ s = str(tb[-1].getsource() )
+ assert s.startswith("def f():")
+ assert s.endswith("raise ValueError")
+
+ @astonly
+ @failsonjython
+ def test_traceback_entry_getsource_in_construct(self):
+ source = _pytest._code.Source("""\
+ def xyz():
+ try:
+ raise ValueError
+ except somenoname:
+ pass
+ xyz()
+ """)
+ try:
+ exec (source.compile())
+ except NameError:
+ tb = _pytest._code.ExceptionInfo().traceback
+ print (tb[-1].getsource())
+ s = str(tb[-1].getsource())
+ assert s.startswith("def xyz():\n try:")
+ assert s.strip().endswith("except somenoname:")
+
+ def test_traceback_cut(self):
+ co = _pytest._code.Code(f)
+ path, firstlineno = co.path, co.firstlineno
+ traceback = self.excinfo.traceback
+ newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ assert len(newtraceback) == 1
+ newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
+ assert len(newtraceback) == 1
+
+ def test_traceback_cut_excludepath(self, testdir):
+ p = testdir.makepyfile("def f(): raise ValueError")
+ excinfo = pytest.raises(ValueError, "p.pyimport().f()")
+ basedir = py.path.local(pytest.__file__).dirpath()
+ newtraceback = excinfo.traceback.cut(excludepath=basedir)
+ for x in newtraceback:
+ if hasattr(x, 'path'):
+ assert not py.path.local(x.path).relto(basedir)
+ assert newtraceback[-1].frame.code.path == p
+
+ def test_traceback_filter(self):
+ traceback = self.excinfo.traceback
+ ntraceback = traceback.filter()
+ assert len(ntraceback) == len(traceback) - 1
+
+ def test_traceback_recursion_index(self):
+ def f(n):
+ if n < 10:
+ n += 1
+ f(n)
+ excinfo = pytest.raises(RuntimeError, f, 8)
+ traceback = excinfo.traceback
+ recindex = traceback.recursionindex()
+ assert recindex == 3
+
+ def test_traceback_only_specific_recursion_errors(self, monkeypatch):
+ def f(n):
+ if n == 0:
+ raise RuntimeError("hello")
+ f(n-1)
+
+ excinfo = pytest.raises(RuntimeError, f, 100)
+ monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
+ repr = excinfo.getrepr()
+ assert "RuntimeError: hello" in str(repr.reprcrash)
+
+ def test_traceback_no_recursion_index(self):
+ def do_stuff():
+ raise RuntimeError
+ def reraise_me():
+ import sys
+ exc, val, tb = sys.exc_info()
+ py.builtin._reraise(exc, val, tb)
+ def f(n):
+ try:
+ do_stuff()
+ except:
+ reraise_me()
+ excinfo = pytest.raises(RuntimeError, f, 8)
+ traceback = excinfo.traceback
+ recindex = traceback.recursionindex()
+ assert recindex is None
+
+ def test_traceback_messy_recursion(self):
+ #XXX: simplified locally testable version
+ decorator = pytest.importorskip('decorator').decorator
+
+ def log(f, *k, **kw):
+ print('%s %s' % (k, kw))
+ f(*k, **kw)
+ log = decorator(log)
+
+ def fail():
+ raise ValueError('')
+
+ fail = log(log(fail))
+
+ excinfo = pytest.raises(ValueError, fail)
+ assert excinfo.traceback.recursionindex() is None
+
+
+
+ def test_traceback_getcrashentry(self):
+ def i():
+ __tracebackhide__ = True
+ raise ValueError
+ def h():
+ i()
+ def g():
+ __tracebackhide__ = True
+ h()
+ def f():
+ g()
+
+ excinfo = pytest.raises(ValueError, f)
+ tb = excinfo.traceback
+ entry = tb.getcrashentry()
+ co = _pytest._code.Code(h)
+ assert entry.frame.code.path == co.path
+ assert entry.lineno == co.firstlineno + 1
+ assert entry.frame.code.name == 'h'
+
+ def test_traceback_getcrashentry_empty(self):
+ def g():
+ __tracebackhide__ = True
+ raise ValueError
+ def f():
+ __tracebackhide__ = True
+ g()
+
+ excinfo = pytest.raises(ValueError, f)
+ tb = excinfo.traceback
+ entry = tb.getcrashentry()
+ co = _pytest._code.Code(g)
+ assert entry.frame.code.path == co.path
+ assert entry.lineno == co.firstlineno + 2
+ assert entry.frame.code.name == 'g'
+
+def hello(x):
+ x + 5
+
+def test_tbentry_reinterpret():
+ try:
+ hello("hello")
+ except TypeError:
+ excinfo = _pytest._code.ExceptionInfo()
+ tbentry = excinfo.traceback[-1]
+ msg = tbentry.reinterpret()
+ assert msg.startswith("TypeError: ('hello' + 5)")
+
+def test_excinfo_exconly():
+ excinfo = pytest.raises(ValueError, h)
+ assert excinfo.exconly().startswith('ValueError')
+ excinfo = pytest.raises(ValueError,
+ "raise ValueError('hello\\nworld')")
+ msg = excinfo.exconly(tryshort=True)
+ assert msg.startswith('ValueError')
+ assert msg.endswith("world")
+
+def test_excinfo_repr():
+ excinfo = pytest.raises(ValueError, h)
+ s = repr(excinfo)
+ assert s == "<ExceptionInfo ValueError tblen=4>"
+
+def test_excinfo_str():
+ excinfo = pytest.raises(ValueError, h)
+ s = str(excinfo)
+ assert s.startswith(__file__[:-9]) # pyc file and $py.class
+ assert s.endswith("ValueError")
+ assert len(s.split(":")) >= 3 # on windows it's 4
+
+def test_excinfo_errisinstance():
+ excinfo = pytest.raises(ValueError, h)
+ assert excinfo.errisinstance(ValueError)
+
+def test_excinfo_no_sourcecode():
+ try:
+ exec ("raise ValueError()")
+ except ValueError:
+ excinfo = _pytest._code.ExceptionInfo()
+ s = str(excinfo.traceback[-1])
+ if py.std.sys.version_info < (2,5):
+ assert s == " File '<string>':1 in ?\n ???\n"
+ else:
+ assert s == " File '<string>':1 in <module>\n ???\n"
+
+def test_excinfo_no_python_sourcecode(tmpdir):
+ #XXX: simplified locally testable version
+ tmpdir.join('test.txt').write("{{ h()}}:")
+
+ jinja2 = pytest.importorskip('jinja2')
+ loader = jinja2.FileSystemLoader(str(tmpdir))
+ env = jinja2.Environment(loader=loader)
+ template = env.get_template('test.txt')
+ excinfo = pytest.raises(ValueError,
+ template.render, h=h)
+ for item in excinfo.traceback:
+ print(item) #XXX: for some reason jinja.Template.render is printed in full
+ item.source # shouldnt fail
+ if item.path.basename == 'test.txt':
+ assert str(item.source) == '{{ h()}}:'
+
+
+def test_entrysource_Queue_example():
+ try:
+ queue.Queue().get(timeout=0.001)
+ except queue.Empty:
+ excinfo = _pytest._code.ExceptionInfo()
+ entry = excinfo.traceback[-1]
+ source = entry.getsource()
+ assert source is not None
+ s = str(source).strip()
+ assert s.startswith("def get")
+
+def test_codepath_Queue_example():
+ try:
+ queue.Queue().get(timeout=0.001)
+ except queue.Empty:
+ excinfo = _pytest._code.ExceptionInfo()
+ entry = excinfo.traceback[-1]
+ path = entry.path
+ assert isinstance(path, py.path.local)
+ assert path.basename.lower() == "queue.py"
+ assert path.check()
+
+class TestFormattedExcinfo:
+ def pytest_funcarg__importasmod(self, request):
+ def importasmod(source):
+ source = _pytest._code.Source(source)
+ tmpdir = request.getfuncargvalue("tmpdir")
+ modpath = tmpdir.join("mod.py")
+ tmpdir.ensure("__init__.py")
+ modpath.write(source)
+ if invalidate_import_caches is not None:
+ invalidate_import_caches()
+ return modpath.pyimport()
+ return importasmod
+
+ def excinfo_from_exec(self, source):
+ source = _pytest._code.Source(source).strip()
+ try:
+ exec (source.compile())
+ except KeyboardInterrupt:
+ raise
+ except:
+ return _pytest._code.ExceptionInfo()
+ assert 0, "did not raise"
+
+ def test_repr_source(self):
+ pr = FormattedExcinfo()
+ source = _pytest._code.Source("""
+ def f(x):
+ pass
+ """).strip()
+ pr.flow_marker = "|"
+ lines = pr.get_source(source, 0)
+ assert len(lines) == 2
+ assert lines[0] == "| def f(x):"
+ assert lines[1] == " pass"
+
+ def test_repr_source_excinfo(self):
+ """ check if indentation is right """
+ pr = FormattedExcinfo()
+ excinfo = self.excinfo_from_exec("""
+ def f():
+ assert 0
+ f()
+ """)
+ pr = FormattedExcinfo()
+ source = pr._getentrysource(excinfo.traceback[-1])
+ lines = pr.get_source(source, 1, excinfo)
+ assert lines == [
+ ' def f():',
+ '> assert 0',
+ 'E assert 0'
+ ]
+
+
+ def test_repr_source_not_existing(self):
+ pr = FormattedExcinfo()
+ co = compile("raise ValueError()", "", "exec")
+ try:
+ exec (co)
+ except ValueError:
+ excinfo = _pytest._code.ExceptionInfo()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
+
+ def test_repr_many_line_source_not_existing(self):
+ pr = FormattedExcinfo()
+ co = compile("""
+a = 1
+raise ValueError()
+""", "", "exec")
+ try:
+ exec (co)
+ except ValueError:
+ excinfo = _pytest._code.ExceptionInfo()
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
+
+ def test_repr_source_failing_fullsource(self):
+ pr = FormattedExcinfo()
+
+ class FakeCode(object):
+ class raw:
+ co_filename = '?'
+ path = '?'
+ firstlineno = 5
+
+ def fullsource(self):
+ return None
+ fullsource = property(fullsource)
+
+ class FakeFrame(object):
+ code = FakeCode()
+ f_locals = {}
+ f_globals = {}
+
+ class FakeTracebackEntry(_pytest._code.Traceback.Entry):
+ def __init__(self, tb):
+ self.lineno = 5+3
+
+ @property
+ def frame(self):
+ return FakeFrame()
+
+ class Traceback(_pytest._code.Traceback):
+ Entry = FakeTracebackEntry
+
+ class FakeExcinfo(_pytest._code.ExceptionInfo):
+ typename = "Foo"
+ def __init__(self):
+ pass
+
+ def exconly(self, tryshort):
+ return "EXC"
+ def errisinstance(self, cls):
+ return False
+
+ excinfo = FakeExcinfo()
+ class FakeRawTB(object):
+ tb_next = None
+ tb = FakeRawTB()
+ excinfo.traceback = Traceback(tb)
+
+ fail = IOError() # noqa
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
+
+ fail = py.error.ENOENT # noqa
+ repr = pr.repr_excinfo(excinfo)
+ assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
+
+
+ def test_repr_local(self):
+ p = FormattedExcinfo(showlocals=True)
+ loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
+ reprlocals = p.repr_locals(loc)
+ assert reprlocals.lines
+ assert reprlocals.lines[0] == '__builtins__ = <builtins>'
+ assert reprlocals.lines[1] == 'x = 3'
+ assert reprlocals.lines[2] == 'y = 5'
+ assert reprlocals.lines[3] == 'z = 7'
+
+ def test_repr_tracebackentry_lines(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = pytest.raises(ValueError, mod.func1)
+ excinfo.traceback = excinfo.traceback.filter()
+ p = FormattedExcinfo()
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
+
+ # test as intermittent entry
+ lines = reprtb.lines
+ assert lines[0] == ' def func1():'
+ assert lines[1] == '> raise ValueError("hello\\nworld")'
+
+ # test as last entry
+ p = FormattedExcinfo(showlocals=True)
+ repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = repr_entry.lines
+ assert lines[0] == ' def func1():'
+ assert lines[1] == '> raise ValueError("hello\\nworld")'
+ assert lines[2] == 'E ValueError: hello'
+ assert lines[3] == 'E world'
+ assert not lines[4:]
+
+ loc = repr_entry.reprlocals is not None
+ loc = repr_entry.reprfileloc
+ assert loc.path == mod.__file__
+ assert loc.lineno == 3
+ #assert loc.message == "ValueError: hello"
+
+ def test_repr_tracebackentry_lines2(self, importasmod):
+ mod = importasmod("""
+ def func1(m, x, y, z):
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = pytest.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
+ excinfo.traceback = excinfo.traceback.filter()
+ entry = excinfo.traceback[-1]
+ p = FormattedExcinfo(funcargs=True)
+ reprfuncargs = p.repr_args(entry)
+ assert reprfuncargs.args[0] == ('m', repr("m"*90))
+ assert reprfuncargs.args[1] == ('x', '5')
+ assert reprfuncargs.args[2] == ('y', '13')
+ assert reprfuncargs.args[3] == ('z', repr("z" * 120))
+
+ p = FormattedExcinfo(funcargs=True)
+ repr_entry = p.repr_traceback_entry(entry)
+ assert repr_entry.reprfuncargs.args == reprfuncargs.args
+ tw = TWMock()
+ repr_entry.toterminal(tw)
+ assert tw.lines[0] == "m = " + repr('m' * 90)
+ assert tw.lines[1] == "x = 5, y = 13"
+ assert tw.lines[2] == "z = " + repr('z' * 120)
+
+ def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
+ mod = importasmod("""
+ def func1(x, *y, **z):
+ raise ValueError("hello\\nworld")
+ """)
+ excinfo = pytest.raises(ValueError, mod.func1, 'a', 'b', c='d')
+ excinfo.traceback = excinfo.traceback.filter()
+ entry = excinfo.traceback[-1]
+ p = FormattedExcinfo(funcargs=True)
+ reprfuncargs = p.repr_args(entry)
+ assert reprfuncargs.args[0] == ('x', repr('a'))
+ assert reprfuncargs.args[1] == ('y', repr(('b',)))
+ assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
+
+ p = FormattedExcinfo(funcargs=True)
+ repr_entry = p.repr_traceback_entry(entry)
+ assert repr_entry.reprfuncargs.args == reprfuncargs.args
+ tw = TWMock()
+ repr_entry.toterminal(tw)
+ assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
+
+ def test_repr_tracebackentry_short(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
+ lines = reprtb.lines
+ basename = py.path.local(mod.__file__).basename
+ assert lines[0] == ' func1()'
+ assert basename in str(reprtb.reprfileloc.path)
+ assert reprtb.reprfileloc.lineno == 5
+
+ # test last entry
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprtb.lines
+ assert lines[0] == ' raise ValueError("hello")'
+ assert lines[1] == 'E ValueError: hello'
+ assert basename in str(reprtb.reprfileloc.path)
+ assert reprtb.reprfileloc.lineno == 3
+
+ def test_repr_tracebackentry_no(self, importasmod):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(style="no")
+ p.repr_traceback_entry(excinfo.traceback[-2])
+
+ p = FormattedExcinfo(style="no")
+ reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprentry.lines
+ assert lines[0] == 'E ValueError: hello'
+ assert not lines[1:]
+
+ def test_repr_traceback_tbfilter(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+ p = FormattedExcinfo(tbfilter=True)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 2
+ p = FormattedExcinfo(tbfilter=False)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 3
+
+ def test_traceback_short_no_source(self, importasmod, monkeypatch):
+ mod = importasmod("""
+ def func1():
+ raise ValueError("hello")
+ def entry():
+ func1()
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+ from _pytest._code.code import Code
+ monkeypatch.setattr(Code, 'path', 'bogus')
+ excinfo.traceback[0].frame.code.path = "bogus"
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
+ lines = reprtb.lines
+ last_p = FormattedExcinfo(style="short")
+ last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ last_lines = last_reprtb.lines
+ monkeypatch.undo()
+ assert lines[0] == ' func1()'
+
+ assert last_lines[0] == ' raise ValueError("hello")'
+ assert last_lines[1] == 'E ValueError: hello'
+
+ def test_repr_traceback_and_excinfo(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+
+ for style in ("long", "short"):
+ p = FormattedExcinfo(style=style)
+ reprtb = p.repr_traceback(excinfo)
+ assert len(reprtb.reprentries) == 2
+ assert reprtb.style == style
+ assert not reprtb.extraline
+ repr = p.repr_excinfo(excinfo)
+ assert repr.reprtraceback
+ assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
+ assert repr.reprcrash.path.endswith("mod.py")
+ assert repr.reprcrash.message == "ValueError: 0"
+
+ def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+
+ p = FormattedExcinfo()
+ def raiseos():
+ raise OSError(2)
+ monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
+ assert p._makepath(__file__) == __file__
+ p.repr_traceback(excinfo)
+
+ def test_repr_excinfo_addouterr(self, importasmod):
+ mod = importasmod("""
+ def entry():
+ raise ValueError()
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+ repr = excinfo.getrepr()
+ repr.addsection("title", "content")
+ twmock = TWMock()
+ repr.toterminal(twmock)
+ assert twmock.lines[-1] == "content"
+ assert twmock.lines[-2] == ("-", "title")
+
+ def test_repr_excinfo_reprcrash(self, importasmod):
+ mod = importasmod("""
+ def entry():
+ raise ValueError()
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+ repr = excinfo.getrepr()
+ assert repr.reprcrash.path.endswith("mod.py")
+ assert repr.reprcrash.lineno == 3
+ assert repr.reprcrash.message == "ValueError"
+ assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
+
+ def test_repr_traceback_recursion(self, importasmod):
+ mod = importasmod("""
+ def rec2(x):
+ return rec1(x+1)
+ def rec1(x):
+ return rec2(x-1)
+ def entry():
+ rec1(42)
+ """)
+ excinfo = pytest.raises(RuntimeError, mod.entry)
+
+ for style in ("short", "long", "no"):
+ p = FormattedExcinfo(style="short")
+ reprtb = p.repr_traceback(excinfo)
+ assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
+ assert str(reprtb)
+
+ def test_tb_entry_AssertionError(self, importasmod):
+ # probably this test is a bit redundant
+ # as py/magic/testing/test_assertion.py
+ # already tests correctness of
+ # assertion-reinterpretation logic
+ mod = importasmod("""
+ def somefunc():
+ x = 1
+ assert x == 2
+ """)
+ excinfo = pytest.raises(AssertionError, mod.somefunc)
+
+ p = FormattedExcinfo()
+ reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
+ lines = reprentry.lines
+ assert lines[-1] == "E assert 1 == 2"
+
+ def test_reprexcinfo_getrepr(self, importasmod):
+ mod = importasmod("""
+ def f(x):
+ raise ValueError(x)
+ def entry():
+ f(0)
+ """)
+ excinfo = pytest.raises(ValueError, mod.entry)
+
+ for style in ("short", "long", "no"):
+ for showlocals in (True, False):
+ repr = excinfo.getrepr(style=style, showlocals=showlocals)
+ assert isinstance(repr, ReprExceptionInfo)
+ assert repr.reprtraceback.style == style
+
+ def test_reprexcinfo_unicode(self):
+ from _pytest._code.code import TerminalRepr
+ class MyRepr(TerminalRepr):
+ def toterminal(self, tw):
+ tw.line(py.builtin._totext("Ñ", "utf-8"))
+ x = py.builtin._totext(MyRepr())
+ assert x == py.builtin._totext("Ñ", "utf-8")
+
+ def test_toterminal_long(self, importasmod):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = pytest.raises(ValueError, mod.f)
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == " def f():"
+ assert tw.lines[1] == "> g(3)"
+ assert tw.lines[2] == ""
+ assert tw.lines[3].endswith("mod.py:5: ")
+ assert tw.lines[4] == ("_ ", None)
+ assert tw.lines[5] == ""
+ assert tw.lines[6] == " def g(x):"
+ assert tw.lines[7] == "> raise ValueError(x)"
+ assert tw.lines[8] == "E ValueError: 3"
+ assert tw.lines[9] == ""
+ assert tw.lines[10].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_missing_source(self, importasmod, tmpdir):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = pytest.raises(ValueError, mod.f)
+ tmpdir.join('mod.py').remove()
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == "> ???"
+ assert tw.lines[1] == ""
+ assert tw.lines[2].endswith("mod.py:5: ")
+ assert tw.lines[3] == ("_ ", None)
+ assert tw.lines[4] == ""
+ assert tw.lines[5] == "> ???"
+ assert tw.lines[6] == "E ValueError: 3"
+ assert tw.lines[7] == ""
+ assert tw.lines[8].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = pytest.raises(ValueError, mod.f)
+ tmpdir.join('mod.py').write('asdf')
+ excinfo.traceback = excinfo.traceback.filter()
+ repr = excinfo.getrepr()
+ tw = TWMock()
+ repr.toterminal(tw)
+ assert tw.lines[0] == ""
+ tw.lines.pop(0)
+ assert tw.lines[0] == "> ???"
+ assert tw.lines[1] == ""
+ assert tw.lines[2].endswith("mod.py:5: ")
+ assert tw.lines[3] == ("_ ", None)
+ assert tw.lines[4] == ""
+ assert tw.lines[5] == "> ???"
+ assert tw.lines[6] == "E ValueError: 3"
+ assert tw.lines[7] == ""
+ assert tw.lines[8].endswith("mod.py:3: ValueError")
+
+ def test_toterminal_long_filenames(self, importasmod):
+ mod = importasmod("""
+ def f():
+ raise ValueError()
+ """)
+ excinfo = pytest.raises(ValueError, mod.f)
+ tw = TWMock()
+ path = py.path.local(mod.__file__)
+ old = path.dirpath().chdir()
+ try:
+ repr = excinfo.getrepr(abspath=False)
+ repr.toterminal(tw)
+ line = tw.lines[-1]
+ x = py.path.local().bestrelpath(path)
+ if len(x) < len(str(path)):
+ assert line == "mod.py:3: ValueError"
+
+ repr = excinfo.getrepr(abspath=True)
+ repr.toterminal(tw)
+ line = tw.lines[-1]
+ assert line == "%s:3: ValueError" %(path,)
+ finally:
+ old.chdir()
+
+ @pytest.mark.parametrize('reproptions', [
+ {'style': style, 'showlocals': showlocals,
+ 'funcargs': funcargs, 'tbfilter': tbfilter
+ } for style in ("long", "short", "no")
+ for showlocals in (True, False)
+ for tbfilter in (True, False)
+ for funcargs in (True, False)])
+ def test_format_excinfo(self, importasmod, reproptions):
+ mod = importasmod("""
+ def g(x):
+ raise ValueError(x)
+ def f():
+ g(3)
+ """)
+ excinfo = pytest.raises(ValueError, mod.f)
+ tw = py.io.TerminalWriter(stringio=True)
+ repr = excinfo.getrepr(**reproptions)
+ repr.toterminal(tw)
+ assert tw.stringio.getvalue()
+
+
+ def test_native_style(self):
+ excinfo = self.excinfo_from_exec("""
+ assert 0
+ """)
+ repr = excinfo.getrepr(style='native')
+ assert "assert 0" in str(repr.reprcrash)
+ s = str(repr)
+ assert s.startswith('Traceback (most recent call last):\n File')
+ assert s.endswith('\nAssertionError: assert 0')
+ assert 'exec (source.compile())' in s
+ # python 2.4 fails to get the source line for the assert
+ if py.std.sys.version_info >= (2, 5):
+ assert s.count('assert 0') == 2
+
+ def test_traceback_repr_style(self, importasmod):
+ mod = importasmod("""
+ def f():
+ g()
+ def g():
+ h()
+ def h():
+ i()
+ def i():
+ raise ValueError()
+ """)
+ excinfo = pytest.raises(ValueError, mod.f)
+ excinfo.traceback = excinfo.traceback.filter()
+ excinfo.traceback[1].set_repr_style("short")
+ excinfo.traceback[2].set_repr_style("short")
+ r = excinfo.getrepr(style="long")
+ tw = TWMock()
+ r.toterminal(tw)
+ for line in tw.lines: print (line)
+ assert tw.lines[0] == ""
+ assert tw.lines[1] == " def f():"
+ assert tw.lines[2] == "> g()"
+ assert tw.lines[3] == ""
+ assert tw.lines[4].endswith("mod.py:3: ")
+ assert tw.lines[5] == ("_ ", None)
+ assert tw.lines[6].endswith("in g")
+ assert tw.lines[7] == " h()"
+ assert tw.lines[8].endswith("in h")
+ assert tw.lines[9] == " i()"
+ assert tw.lines[10] == ("_ ", None)
+ assert tw.lines[11] == ""
+ assert tw.lines[12] == " def i():"
+ assert tw.lines[13] == "> raise ValueError()"
+ assert tw.lines[14] == "E ValueError"
+ assert tw.lines[15] == ""
+ assert tw.lines[16].endswith("mod.py:9: ValueError")
diff --git a/testing/web-platform/tests/tools/pytest/testing/code/test_source.py b/testing/web-platform/tests/tools/pytest/testing/code/test_source.py
new file mode 100644
index 000000000..007ad1433
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/code/test_source.py
@@ -0,0 +1,659 @@
+# flake8: noqa
+# disable flake check on this file because some constructs are strange
+# or redundant on purpose and can't be disable on a line-by-line basis
+import sys
+
+import _pytest._code
+import py
+import pytest
+from _pytest._code import Source
+from _pytest._code.source import _ast
+
+if _ast is not None:
+ astonly = pytest.mark.nothing
+else:
+ astonly = pytest.mark.xfail("True", reason="only works with AST-compile")
+
+failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
+
+def test_source_str_function():
+ x = Source("3")
+ assert str(x) == "3"
+
+ x = Source(" 3")
+ assert str(x) == "3"
+
+ x = Source("""
+ 3
+ """, rstrip=False)
+ assert str(x) == "\n3\n "
+
+ x = Source("""
+ 3
+ """, rstrip=True)
+ assert str(x) == "\n3"
+
+def test_unicode():
+ try:
+ unicode
+ except NameError:
+ return
+ x = Source(unicode("4"))
+ assert str(x) == "4"
+ co = _pytest._code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
+ val = eval(co)
+ assert isinstance(val, unicode)
+
+def test_source_from_function():
+ source = _pytest._code.Source(test_source_str_function)
+ assert str(source).startswith('def test_source_str_function():')
+
+def test_source_from_method():
+ class TestClass:
+ def test_method(self):
+ pass
+ source = _pytest._code.Source(TestClass().test_method)
+ assert source.lines == ["def test_method(self):",
+ " pass"]
+
+def test_source_from_lines():
+ lines = ["a \n", "b\n", "c"]
+ source = _pytest._code.Source(lines)
+ assert source.lines == ['a ', 'b', 'c']
+
+def test_source_from_inner_function():
+ def f():
+ pass
+ source = _pytest._code.Source(f, deindent=False)
+ assert str(source).startswith(' def f():')
+ source = _pytest._code.Source(f)
+ assert str(source).startswith('def f():')
+
+def test_source_putaround_simple():
+ source = Source("raise ValueError")
+ source = source.putaround(
+ "try:", """\
+ except ValueError:
+ x = 42
+ else:
+ x = 23""")
+ assert str(source)=="""\
+try:
+ raise ValueError
+except ValueError:
+ x = 42
+else:
+ x = 23"""
+
+def test_source_putaround():
+ source = Source()
+ source = source.putaround("""
+ if 1:
+ x=1
+ """)
+ assert str(source).strip() == "if 1:\n x=1"
+
+def test_source_strips():
+ source = Source("")
+ assert source == Source()
+ assert str(source) == ''
+ assert source.strip() == source
+
+def test_source_strip_multiline():
+ source = Source()
+ source.lines = ["", " hello", " "]
+ source2 = source.strip()
+ assert source2.lines == [" hello"]
+
+def test_syntaxerror_rerepresentation():
+ ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz')
+ assert ex.value.lineno == 1
+ assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
+ assert ex.value.text.strip(), 'x x'
+
+def test_isparseable():
+ assert Source("hello").isparseable()
+ assert Source("if 1:\n pass").isparseable()
+ assert Source(" \nif 1:\n pass").isparseable()
+ assert not Source("if 1:\n").isparseable()
+ assert not Source(" \nif 1:\npass").isparseable()
+ assert not Source(chr(0)).isparseable()
+
+class TestAccesses:
+ source = Source("""\
+ def f(x):
+ pass
+ def g(x):
+ pass
+ """)
+ def test_getrange(self):
+ x = self.source[0:2]
+ assert x.isparseable()
+ assert len(x.lines) == 2
+ assert str(x) == "def f(x):\n pass"
+
+ def test_getline(self):
+ x = self.source[0]
+ assert x == "def f(x):"
+
+ def test_len(self):
+ assert len(self.source) == 4
+
+ def test_iter(self):
+ l = [x for x in self.source]
+ assert len(l) == 4
+
+class TestSourceParsingAndCompiling:
+ source = Source("""\
+ def f(x):
+ assert (x ==
+ 3 +
+ 4)
+ """).strip()
+
+ def test_compile(self):
+ co = _pytest._code.compile("x=3")
+ d = {}
+ exec (co, d)
+ assert d['x'] == 3
+
+ def test_compile_and_getsource_simple(self):
+ co = _pytest._code.compile("x=3")
+ exec (co)
+ source = _pytest._code.Source(co)
+ assert str(source) == "x=3"
+
+ def test_compile_and_getsource_through_same_function(self):
+ def gensource(source):
+ return _pytest._code.compile(source)
+ co1 = gensource("""
+ def f():
+ raise KeyError()
+ """)
+ co2 = gensource("""
+ def f():
+ raise ValueError()
+ """)
+ source1 = py.std.inspect.getsource(co1)
+ assert 'KeyError' in source1
+ source2 = py.std.inspect.getsource(co2)
+ assert 'ValueError' in source2
+
+ def test_getstatement(self):
+ #print str(self.source)
+ ass = str(self.source[1:])
+ for i in range(1, 4):
+ #print "trying start in line %r" % self.source[i]
+ s = self.source.getstatement(i)
+ #x = s.deindent()
+ assert str(s) == ass
+
+ def test_getstatementrange_triple_quoted(self):
+ #print str(self.source)
+ source = Source("""hello('''
+ ''')""")
+ s = source.getstatement(0)
+ assert s == str(source)
+ s = source.getstatement(1)
+ assert s == str(source)
+
+ @astonly
+ def test_getstatementrange_within_constructs(self):
+ source = Source("""\
+ try:
+ try:
+ raise ValueError
+ except SomeThing:
+ pass
+ finally:
+ 42
+ """)
+ assert len(source) == 7
+ # check all lineno's that could occur in a traceback
+ #assert source.getstatementrange(0) == (0, 7)
+ #assert source.getstatementrange(1) == (1, 5)
+ assert source.getstatementrange(2) == (2, 3)
+ assert source.getstatementrange(3) == (3, 4)
+ assert source.getstatementrange(4) == (4, 5)
+ #assert source.getstatementrange(5) == (0, 7)
+ assert source.getstatementrange(6) == (6, 7)
+
+ def test_getstatementrange_bug(self):
+ source = Source("""\
+ try:
+ x = (
+ y +
+ z)
+ except:
+ pass
+ """)
+ assert len(source) == 6
+ assert source.getstatementrange(2) == (1, 4)
+
+ def test_getstatementrange_bug2(self):
+ source = Source("""\
+ assert (
+ 33
+ ==
+ [
+ X(3,
+ b=1, c=2
+ ),
+ ]
+ )
+ """)
+ assert len(source) == 9
+ assert source.getstatementrange(5) == (0, 9)
+
+ def test_getstatementrange_ast_issue58(self):
+ source = Source("""\
+
+ def test_some():
+ for a in [a for a in
+ CAUSE_ERROR]: pass
+
+ x = 3
+ """)
+ assert getstatement(2, source).lines == source.lines[2:3]
+ assert getstatement(3, source).lines == source.lines[3:4]
+
+ @pytest.mark.skipif("sys.version_info < (2,6)")
+ def test_getstatementrange_out_of_bounds_py3(self):
+ source = Source("if xxx:\n from .collections import something")
+ r = source.getstatementrange(1)
+ assert r == (1,2)
+
+ def test_getstatementrange_with_syntaxerror_issue7(self):
+ source = Source(":")
+ pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
+
+ @pytest.mark.skipif("sys.version_info < (2,6)")
+ def test_compile_to_ast(self):
+ import ast
+ source = Source("x = 4")
+ mod = source.compile(flag=ast.PyCF_ONLY_AST)
+ assert isinstance(mod, ast.Module)
+ compile(mod, "<filename>", "exec")
+
+ def test_compile_and_getsource(self):
+ co = self.source.compile()
+ py.builtin.exec_(co, globals())
+ f(7)
+ excinfo = pytest.raises(AssertionError, "f(6)")
+ frame = excinfo.traceback[-1].frame
+ stmt = frame.code.fullsource.getstatement(frame.lineno)
+ #print "block", str(block)
+ assert str(stmt).strip().startswith('assert')
+
+ def test_compilefuncs_and_path_sanity(self):
+ def check(comp, name):
+ co = comp(self.source, name)
+ if not name:
+ expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
+ else:
+ expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
+ fn = co.co_filename
+ assert fn.endswith(expected)
+
+ mycode = _pytest._code.Code(self.test_compilefuncs_and_path_sanity)
+ mylineno = mycode.firstlineno
+ mypath = mycode.path
+
+ for comp in _pytest._code.compile, _pytest._code.Source.compile:
+ for name in '', None, 'my':
+ yield check, comp, name
+
+ def test_offsetless_synerr(self):
+ pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
+
+def test_getstartingblock_singleline():
+ class A:
+ def __init__(self, *args):
+ frame = sys._getframe(1)
+ self.source = _pytest._code.Frame(frame).statement
+
+ x = A('x', 'y')
+
+ l = [i for i in x.source.lines if i.strip()]
+ assert len(l) == 1
+
+def test_getstartingblock_multiline():
+ class A:
+ def __init__(self, *args):
+ frame = sys._getframe(1)
+ self.source = _pytest._code.Frame(frame).statement
+
+ x = A('x',
+ 'y' \
+ ,
+ 'z')
+
+ l = [i for i in x.source.lines if i.strip()]
+ assert len(l) == 4
+
+def test_getline_finally():
+ def c(): pass
+ excinfo = pytest.raises(TypeError, """
+ teardown = None
+ try:
+ c(1)
+ finally:
+ if teardown:
+ teardown()
+ """)
+ source = excinfo.traceback[-1].statement
+ assert str(source).strip() == 'c(1)'
+
+def test_getfuncsource_dynamic():
+ source = """
+ def f():
+ raise ValueError
+
+ def g(): pass
+ """
+ co = _pytest._code.compile(source)
+ py.builtin.exec_(co, globals())
+ assert str(_pytest._code.Source(f)).strip() == 'def f():\n raise ValueError'
+ assert str(_pytest._code.Source(g)).strip() == 'def g(): pass'
+
+
+def test_getfuncsource_with_multine_string():
+ def f():
+ c = '''while True:
+ pass
+'''
+ assert str(_pytest._code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
+
+
+def test_deindent():
+ from _pytest._code.source import deindent as deindent
+ assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
+
+ def f():
+ c = '''while True:
+ pass
+'''
+ import inspect
+ lines = deindent(inspect.getsource(f).splitlines())
+ assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
+
+ source = """
+ def f():
+ def g():
+ pass
+ """
+ lines = deindent(source.splitlines())
+ assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
+
+@pytest.mark.xfail("sys.version_info[:3] < (2,7,0) or "
+ "((3,0) <= sys.version_info[:2] < (3,2))")
+def test_source_of_class_at_eof_without_newline(tmpdir):
+ # this test fails because the implicit inspect.getsource(A) below
+ # does not return the "x = 1" last line.
+ source = _pytest._code.Source('''
+ class A(object):
+ def method(self):
+ x = 1
+ ''')
+ path = tmpdir.join("a.py")
+ path.write(source)
+ s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
+ assert str(source).strip() == str(s2).strip()
+
+if True:
+ def x():
+ pass
+
+def test_getsource_fallback():
+ from _pytest._code.source import getsource
+ expected = """def x():
+ pass"""
+ src = getsource(x)
+ assert src == expected
+
+def test_idem_compile_and_getsource():
+ from _pytest._code.source import getsource
+ expected = "def x(): pass"
+ co = _pytest._code.compile(expected)
+ src = getsource(co)
+ assert src == expected
+
+def test_findsource_fallback():
+ from _pytest._code.source import findsource
+ src, lineno = findsource(x)
+ assert 'test_findsource_simple' in str(src)
+ assert src[lineno] == ' def x():'
+
+def test_findsource():
+ from _pytest._code.source import findsource
+ co = _pytest._code.compile("""if 1:
+ def x():
+ pass
+""")
+
+ src, lineno = findsource(co)
+ assert 'if 1:' in str(src)
+
+ d = {}
+ eval(co, d)
+ src, lineno = findsource(d['x'])
+ assert 'if 1:' in str(src)
+ assert src[lineno] == " def x():"
+
+
+def test_getfslineno():
+ from _pytest._code import getfslineno
+
+ def f(x):
+ pass
+
+ fspath, lineno = getfslineno(f)
+
+ assert fspath.basename == "test_source.py"
+ assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource
+
+ class A(object):
+ pass
+
+ fspath, lineno = getfslineno(A)
+
+ _, A_lineno = py.std.inspect.findsource(A)
+ assert fspath.basename == "test_source.py"
+ assert lineno == A_lineno
+
+ assert getfslineno(3) == ("", -1)
+ class B:
+ pass
+ B.__name__ = "B2"
+ assert getfslineno(B)[1] == -1
+
+def test_code_of_object_instance_with_call():
+ class A:
+ pass
+ pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
+ class WithCall:
+ def __call__(self):
+ pass
+
+ code = _pytest._code.Code(WithCall())
+ assert 'pass' in str(code.source())
+
+ class Hello(object):
+ def __call__(self):
+ pass
+ pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
+
+
+def getstatement(lineno, source):
+ from _pytest._code.source import getstatementrange_ast
+ source = _pytest._code.Source(source, deindent=False)
+ ast, start, end = getstatementrange_ast(lineno, source)
+ return source[start:end]
+
+def test_oneline():
+ source = getstatement(0, "raise ValueError")
+ assert str(source) == "raise ValueError"
+
+def test_comment_and_no_newline_at_end():
+ from _pytest._code.source import getstatementrange_ast
+ source = Source(['def test_basic_complex():',
+ ' assert 1 == 2',
+ '# vim: filetype=pyopencl:fdm=marker'])
+ ast, start, end = getstatementrange_ast(1, source)
+ assert end == 2
+
+def test_oneline_and_comment():
+ source = getstatement(0, "raise ValueError\n#hello")
+ assert str(source) == "raise ValueError"
+
+@pytest.mark.xfail(hasattr(sys, "pypy_version_info"),
+ reason='does not work on pypy')
+def test_comments():
+ source = '''def test():
+ "comment 1"
+ x = 1
+ # comment 2
+ # comment 3
+
+ assert False
+
+"""
+comment 4
+"""
+'''
+ for line in range(2,6):
+ assert str(getstatement(line, source)) == ' x = 1'
+ for line in range(6,10):
+ assert str(getstatement(line, source)) == ' assert False'
+ assert str(getstatement(10, source)) == '"""'
+
+def test_comment_in_statement():
+ source = '''test(foo=1,
+ # comment 1
+ bar=2)
+'''
+ for line in range(1,3):
+ assert str(getstatement(line, source)) == \
+ 'test(foo=1,\n # comment 1\n bar=2)'
+
+def test_single_line_else():
+ source = getstatement(1, "if False: 2\nelse: 3")
+ assert str(source) == "else: 3"
+
+def test_single_line_finally():
+ source = getstatement(1, "try: 1\nfinally: 3")
+ assert str(source) == "finally: 3"
+
+def test_issue55():
+ source = ('def round_trip(dinp):\n assert 1 == dinp\n'
+ 'def test_rt():\n round_trip("""\n""")\n')
+ s = getstatement(3, source)
+ assert str(s) == ' round_trip("""\n""")'
+
+
+def XXXtest_multiline():
+ source = getstatement(0, """\
+raise ValueError(
+ 23
+)
+x = 3
+""")
+ assert str(source) == "raise ValueError(\n 23\n)"
+
+class TestTry:
+ pytestmark = astonly
+ source = """\
+try:
+ raise ValueError
+except Something:
+ raise IndexError(1)
+else:
+ raise KeyError()
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " raise ValueError"
+
+ def test_except_line(self):
+ source = getstatement(2, self.source)
+ assert str(source) == "except Something:"
+
+ def test_except_body(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " raise IndexError(1)"
+
+ def test_else(self):
+ source = getstatement(5, self.source)
+ assert str(source) == " raise KeyError()"
+
+class TestTryFinally:
+ source = """\
+try:
+ raise ValueError
+finally:
+ raise IndexError(1)
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " raise ValueError"
+
+ def test_finally(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " raise IndexError(1)"
+
+
+
+class TestIf:
+ pytestmark = astonly
+ source = """\
+if 1:
+ y = 3
+elif False:
+ y = 5
+else:
+ y = 7
+"""
+
+ def test_body(self):
+ source = getstatement(1, self.source)
+ assert str(source) == " y = 3"
+
+ def test_elif_clause(self):
+ source = getstatement(2, self.source)
+ assert str(source) == "elif False:"
+
+ def test_elif(self):
+ source = getstatement(3, self.source)
+ assert str(source) == " y = 5"
+
+ def test_else(self):
+ source = getstatement(5, self.source)
+ assert str(source) == " y = 7"
+
+def test_semicolon():
+ s = """\
+hello ; pytest.skip()
+"""
+ source = getstatement(0, s)
+ assert str(source) == s.strip()
+
+def test_def_online():
+ s = """\
+def func(): raise ValueError(42)
+
+def something():
+ pass
+"""
+ source = getstatement(0, s)
+ assert str(source) == "def func(): raise ValueError(42)"
+
+def XXX_test_expression_multiline():
+ source = """\
+something
+'''
+'''"""
+ result = getstatement(1, source)
+ assert str(result) == "'''\n'''"
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/cx_freeze/install_cx_freeze.py b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/install_cx_freeze.py
new file mode 100644
index 000000000..83dce87aa
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/install_cx_freeze.py
@@ -0,0 +1,64 @@
+"""
+Installs cx_freeze from source, but first patching
+setup.py as described here:
+
+http://stackoverflow.com/questions/25107697/compiling-cx-freeze-under-ubuntu
+"""
+import glob
+import tarfile
+import os
+import sys
+import platform
+import py
+
+if __name__ == '__main__':
+ if 'ubuntu' not in platform.version().lower():
+
+ print('Not Ubuntu, installing using pip. (platform.version() is %r)' %
+ platform.version())
+ res = os.system('pip install cx_freeze')
+ if res != 0:
+ sys.exit(res)
+ sys.exit(0)
+
+ rootdir = py.path.local.make_numbered_dir(prefix='cx_freeze')
+
+ res = os.system('pip install --download %s --no-use-wheel '
+ 'cx_freeze' % rootdir)
+ if res != 0:
+ sys.exit(res)
+
+ packages = glob.glob('%s/*.tar.gz' % rootdir)
+ assert len(packages) == 1
+ tar_filename = packages[0]
+
+ tar_file = tarfile.open(tar_filename)
+ try:
+ tar_file.extractall(path=str(rootdir))
+ finally:
+ tar_file.close()
+
+ basename = os.path.basename(tar_filename).replace('.tar.gz', '')
+ setup_py_filename = '%s/%s/setup.py' % (rootdir, basename)
+ with open(setup_py_filename) as f:
+ lines = f.readlines()
+
+ line_to_patch = 'if not vars.get("Py_ENABLE_SHARED", 0):'
+ for index, line in enumerate(lines):
+ if line_to_patch in line:
+ indent = line[:line.index(line_to_patch)]
+ lines[index] = indent + 'if True:\n'
+ print('Patched line %d' % (index + 1))
+ break
+ else:
+ sys.exit('Could not find line in setup.py to patch!')
+
+ with open(setup_py_filename, 'w') as f:
+ f.writelines(lines)
+
+ os.chdir('%s/%s' % (rootdir, basename))
+ res = os.system('python setup.py install')
+ if res != 0:
+ sys.exit(res)
+
+ sys.exit(0)
diff --git a/testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_script.py b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_script.py
new file mode 100644
index 000000000..f2b032d76
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_script.py
@@ -0,0 +1,9 @@
+"""
+This is the script that is actually frozen into an executable: simply executes
+py.test main().
+"""
+
+if __name__ == '__main__':
+ import sys
+ import pytest
+ sys.exit(pytest.main()) \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_setup.py b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_setup.py
new file mode 100644
index 000000000..a2874a655
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/runtests_setup.py
@@ -0,0 +1,15 @@
+"""
+Sample setup.py script that generates an executable with pytest runner embedded.
+"""
+if __name__ == '__main__':
+ from cx_Freeze import setup, Executable
+ import pytest
+
+ setup(
+ name="runtests",
+ version="0.1",
+ description="exemple of how embedding py.test into an executable using cx_freeze",
+ executables=[Executable("runtests_script.py")],
+ options={"build_exe": {'includes': pytest.freeze_includes()}},
+ )
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_doctest.txt b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_doctest.txt
new file mode 100644
index 000000000..e18a4b68c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_doctest.txt
@@ -0,0 +1,6 @@
+
+
+Testing doctest::
+
+ >>> 1 + 1
+ 2
diff --git a/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_trivial.py b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_trivial.py
new file mode 100644
index 000000000..d8a572baa
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tests/test_trivial.py
@@ -0,0 +1,6 @@
+
+def test_upper():
+ assert 'foo'.upper() == 'FOO'
+
+def test_lower():
+ assert 'FOO'.lower() == 'foo' \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tox_run.py b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tox_run.py
new file mode 100644
index 000000000..e8df2684b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/cx_freeze/tox_run.py
@@ -0,0 +1,15 @@
+"""
+Called by tox.ini: uses the generated executable to run the tests in ./tests/
+directory.
+
+.. note:: somehow calling "build/runtests_script" directly from tox doesn't
+ seem to work (at least on Windows).
+"""
+if __name__ == '__main__':
+ import os
+ import sys
+
+ executable = os.path.join(os.getcwd(), 'build', 'runtests_script')
+ if sys.platform.startswith('win'):
+ executable += '.exe'
+ sys.exit(os.system('%s tests' % executable)) \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/testing/python/collect.py b/testing/web-platform/tests/tools/pytest/testing/python/collect.py
new file mode 100644
index 000000000..22433da77
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/python/collect.py
@@ -0,0 +1,1200 @@
+# -*- coding: utf-8 -*-
+import sys
+from textwrap import dedent
+
+import _pytest._code
+import py
+import pytest
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+
+
+class TestModule:
+ def test_failing_import(self, testdir):
+ modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
+ pytest.raises(ImportError, modcol.collect)
+ pytest.raises(ImportError, modcol.collect)
+
+ def test_import_duplicate(self, testdir):
+ a = testdir.mkdir("a")
+ b = testdir.mkdir("b")
+ p = a.ensure("test_whatever.py")
+ p.pyimport()
+ del py.std.sys.modules['test_whatever']
+ b.ensure("test_whatever.py")
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*import*mismatch*",
+ "*imported*test_whatever*",
+ "*%s*" % a.join("test_whatever.py"),
+ "*not the same*",
+ "*%s*" % b.join("test_whatever.py"),
+ "*HINT*",
+ ])
+
+ def test_import_prepend_append(self, testdir, monkeypatch):
+ syspath = list(sys.path)
+ monkeypatch.setattr(sys, "path", syspath)
+ root1 = testdir.mkdir("root1")
+ root2 = testdir.mkdir("root2")
+ root1.ensure("x456.py")
+ root2.ensure("x456.py")
+ p = root2.join("test_x456.py")
+ monkeypatch.syspath_prepend(str(root1))
+ p.write(dedent("""\
+ import x456
+ def test():
+ assert x456.__file__.startswith(%r)
+ """ % str(root2)))
+ with root2.as_cwd():
+ reprec = testdir.inline_run("--import-mode=append")
+ reprec.assertoutcome(passed=0, failed=1)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_syntax_error_in_module(self, testdir):
+ modcol = testdir.getmodulecol("this is a syntax error")
+ pytest.raises(modcol.CollectError, modcol.collect)
+ pytest.raises(modcol.CollectError, modcol.collect)
+
+ def test_module_considers_pluginmanager_at_import(self, testdir):
+ modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
+ pytest.raises(ImportError, lambda: modcol.obj)
+
+class TestClass:
+ def test_class_with_init_warning(self, testdir):
+ testdir.makepyfile("""
+ class TestClass1:
+ def __init__(self):
+ pass
+ """)
+ result = testdir.runpytest("-rw")
+ result.stdout.fnmatch_lines_random("""
+ WC1*test_class_with_init_warning.py*__init__*
+ """)
+
+ def test_class_subclassobject(self, testdir):
+ testdir.getmodulecol("""
+ class test(object):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*collected 0*",
+ ])
+
+ def test_setup_teardown_class_as_classmethod(self, testdir):
+ testdir.makepyfile(test_mod1="""
+ class TestClassMethod:
+ @classmethod
+ def setup_class(cls):
+ pass
+ def test_1(self):
+ pass
+ @classmethod
+ def teardown_class(cls):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+
+ def test_issue1035_obj_has_getattr(self, testdir):
+ modcol = testdir.getmodulecol("""
+ class Chameleon(object):
+ def __getattr__(self, name):
+ return True
+ chameleon = Chameleon()
+ """)
+ colitems = modcol.collect()
+ assert len(colitems) == 0
+
+
+class TestGenerator:
+ def test_generative_functions(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def func1(arg, arg2):
+ assert arg == arg2
+
+ def test_gen():
+ yield func1, 17, 3*5
+ yield func1, 42, 6*7
+ """)
+ colitems = modcol.collect()
+ assert len(colitems) == 1
+ gencol = colitems[0]
+ assert isinstance(gencol, pytest.Generator)
+ gencolitems = gencol.collect()
+ assert len(gencolitems) == 2
+ assert isinstance(gencolitems[0], pytest.Function)
+ assert isinstance(gencolitems[1], pytest.Function)
+ assert gencolitems[0].name == '[0]'
+ assert gencolitems[0].obj.__name__ == 'func1'
+
+ def test_generative_methods(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def func1(arg, arg2):
+ assert arg == arg2
+ class TestGenMethods:
+ def test_gen(self):
+ yield func1, 17, 3*5
+ yield func1, 42, 6*7
+ """)
+ gencol = modcol.collect()[0].collect()[0].collect()[0]
+ assert isinstance(gencol, pytest.Generator)
+ gencolitems = gencol.collect()
+ assert len(gencolitems) == 2
+ assert isinstance(gencolitems[0], pytest.Function)
+ assert isinstance(gencolitems[1], pytest.Function)
+ assert gencolitems[0].name == '[0]'
+ assert gencolitems[0].obj.__name__ == 'func1'
+
+ def test_generative_functions_with_explicit_names(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def func1(arg, arg2):
+ assert arg == arg2
+
+ def test_gen():
+ yield "seventeen", func1, 17, 3*5
+ yield "fortytwo", func1, 42, 6*7
+ """)
+ colitems = modcol.collect()
+ assert len(colitems) == 1
+ gencol = colitems[0]
+ assert isinstance(gencol, pytest.Generator)
+ gencolitems = gencol.collect()
+ assert len(gencolitems) == 2
+ assert isinstance(gencolitems[0], pytest.Function)
+ assert isinstance(gencolitems[1], pytest.Function)
+ assert gencolitems[0].name == "['seventeen']"
+ assert gencolitems[0].obj.__name__ == 'func1'
+ assert gencolitems[1].name == "['fortytwo']"
+ assert gencolitems[1].obj.__name__ == 'func1'
+
+ def test_generative_functions_unique_explicit_names(self, testdir):
+ # generative
+ modcol = testdir.getmodulecol("""
+ def func(): pass
+ def test_gen():
+ yield "name", func
+ yield "name", func
+ """)
+ colitems = modcol.collect()
+ assert len(colitems) == 1
+ gencol = colitems[0]
+ assert isinstance(gencol, pytest.Generator)
+ pytest.raises(ValueError, "gencol.collect()")
+
+ def test_generative_methods_with_explicit_names(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def func1(arg, arg2):
+ assert arg == arg2
+ class TestGenMethods:
+ def test_gen(self):
+ yield "m1", func1, 17, 3*5
+ yield "m2", func1, 42, 6*7
+ """)
+ gencol = modcol.collect()[0].collect()[0].collect()[0]
+ assert isinstance(gencol, pytest.Generator)
+ gencolitems = gencol.collect()
+ assert len(gencolitems) == 2
+ assert isinstance(gencolitems[0], pytest.Function)
+ assert isinstance(gencolitems[1], pytest.Function)
+ assert gencolitems[0].name == "['m1']"
+ assert gencolitems[0].obj.__name__ == 'func1'
+ assert gencolitems[1].name == "['m2']"
+ assert gencolitems[1].obj.__name__ == 'func1'
+
+ def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
+ o = testdir.makepyfile("""
+ def test_generative_order_of_execution():
+ import py, pytest
+ test_list = []
+ expected_list = list(range(6))
+
+ def list_append(item):
+ test_list.append(item)
+
+ def assert_order_of_execution():
+ py.builtin.print_('expected order', expected_list)
+ py.builtin.print_('but got ', test_list)
+ assert test_list == expected_list
+
+ for i in expected_list:
+ yield list_append, i
+ yield assert_order_of_execution
+ """)
+ reprec = testdir.inline_run(o)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert passed == 7
+ assert not skipped and not failed
+
+ def test_order_of_execution_generator_different_codeline(self, testdir):
+ o = testdir.makepyfile("""
+ def test_generative_tests_different_codeline():
+ import py, pytest
+ test_list = []
+ expected_list = list(range(3))
+
+ def list_append_2():
+ test_list.append(2)
+
+ def list_append_1():
+ test_list.append(1)
+
+ def list_append_0():
+ test_list.append(0)
+
+ def assert_order_of_execution():
+ py.builtin.print_('expected order', expected_list)
+ py.builtin.print_('but got ', test_list)
+ assert test_list == expected_list
+
+ yield list_append_0
+ yield list_append_1
+ yield list_append_2
+ yield assert_order_of_execution
+ """)
+ reprec = testdir.inline_run(o)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert passed == 4
+ assert not skipped and not failed
+
+ def test_setupstate_is_preserved_134(self, testdir):
+ # yield-based tests are messy wrt to setupstate because
+ # during collection they already invoke setup functions
+ # and then again when they are run. For now, we want to make sure
+ # that the old 1.3.4 behaviour is preserved such that all
+ # yielded functions all share the same "self" instance that
+ # has been used during collection.
+ o = testdir.makepyfile("""
+ setuplist = []
+ class TestClass:
+ def setup_method(self, func):
+ #print "setup_method", self, func
+ setuplist.append(self)
+ self.init = 42
+
+ def teardown_method(self, func):
+ self.init = None
+
+ def test_func1(self):
+ pass
+
+ def test_func2(self):
+ yield self.func2
+ yield self.func2
+
+ def func2(self):
+ assert self.init
+
+ def test_setuplist():
+ # once for test_func2 during collection
+ # once for test_func1 during test run
+ # once for test_func2 during test run
+ #print setuplist
+ assert len(setuplist) == 3, len(setuplist)
+ assert setuplist[0] == setuplist[2], setuplist
+ assert setuplist[1] != setuplist[2], setuplist
+ """)
+ reprec = testdir.inline_run(o, '-v')
+ passed, skipped, failed = reprec.countoutcomes()
+ assert passed == 4
+ assert not skipped and not failed
+
+
+class TestFunction:
+ def test_getmodulecollector(self, testdir):
+ item = testdir.getitem("def test_func(): pass")
+ modcol = item.getparent(pytest.Module)
+ assert isinstance(modcol, pytest.Module)
+ assert hasattr(modcol.obj, 'test_func')
+
+ def test_function_as_object_instance_ignored(self, testdir):
+ testdir.makepyfile("""
+ class A:
+ def __call__(self, tmpdir):
+ 0/0
+
+ test_a = A()
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome()
+
+ def test_function_equality(self, testdir, tmpdir):
+ from _pytest.python import FixtureManager
+ config = testdir.parseconfigure()
+ session = testdir.Session(config)
+ session._fixturemanager = FixtureManager(session)
+ def func1():
+ pass
+ def func2():
+ pass
+ f1 = pytest.Function(name="name", parent=session, config=config,
+ args=(1,), callobj=func1)
+ assert f1 == f1
+ f2 = pytest.Function(name="name",config=config,
+ callobj=func2, parent=session)
+ assert f1 != f2
+
+ def test_issue197_parametrize_emptyset(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize('arg', [])
+ def test_function(arg):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(skipped=1)
+
+ def test_single_tuple_unwraps_values(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize(('arg',), [(1,)])
+ def test_function(arg):
+ assert arg == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_issue213_parametrize_value_no_equal(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ class A:
+ def __eq__(self, other):
+ raise ValueError("not possible")
+ @pytest.mark.parametrize('arg', [A()])
+ def test_function(arg):
+ assert arg.__class__.__name__ == "A"
+ """)
+ reprec = testdir.inline_run("--fulltrace")
+ reprec.assertoutcome(passed=1)
+
+ def test_parametrize_with_non_hashable_values(self, testdir):
+ """Test parametrization with non-hashable values."""
+ testdir.makepyfile("""
+ archival_mapping = {
+ '1.0': {'tag': '1.0'},
+ '1.2.2a1': {'tag': 'release-1.2.2a1'},
+ }
+
+ import pytest
+ @pytest.mark.parametrize('key value'.split(),
+ archival_mapping.items())
+ def test_archival_to_version(key, value):
+ assert key in archival_mapping
+ assert value == archival_mapping[key]
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(passed=2)
+
+
+ def test_parametrize_with_non_hashable_values_indirect(self, testdir):
+ """Test parametrization with non-hashable values with indirect parametrization."""
+ testdir.makepyfile("""
+ archival_mapping = {
+ '1.0': {'tag': '1.0'},
+ '1.2.2a1': {'tag': 'release-1.2.2a1'},
+ }
+
+ import pytest
+
+ @pytest.fixture
+ def key(request):
+ return request.param
+
+ @pytest.fixture
+ def value(request):
+ return request.param
+
+ @pytest.mark.parametrize('key value'.split(),
+ archival_mapping.items(), indirect=True)
+ def test_archival_to_version(key, value):
+ assert key in archival_mapping
+ assert value == archival_mapping[key]
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(passed=2)
+
+
+ def test_parametrize_overrides_fixture(self, testdir):
+ """Test parametrization when parameter overrides existing fixture with same name."""
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def value():
+ return 'value'
+
+ @pytest.mark.parametrize('value',
+ ['overridden'])
+ def test_overridden_via_param(value):
+ assert value == 'overridden'
+
+ @pytest.mark.parametrize('somevalue', ['overridden'])
+ def test_not_overridden(value, somevalue):
+ assert value == 'value'
+ assert somevalue == 'overridden'
+
+ @pytest.mark.parametrize('other,value', [('foo', 'overridden')])
+ def test_overridden_via_multiparam(other, value):
+ assert other == 'foo'
+ assert value == 'overridden'
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(passed=3)
+
+
+ def test_parametrize_overrides_parametrized_fixture(self, testdir):
+ """Test parametrization when parameter overrides existing parametrized fixture with same name."""
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(params=[1, 2])
+ def value(request):
+ return request.param
+
+ @pytest.mark.parametrize('value',
+ ['overridden'])
+ def test_overridden_via_param(value):
+ assert value == 'overridden'
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(passed=1)
+
+ def test_parametrize_with_mark(selfself, testdir):
+ items = testdir.getitems("""
+ import pytest
+ @pytest.mark.foo
+ @pytest.mark.parametrize('arg', [
+ 1,
+ pytest.mark.bar(pytest.mark.baz(2))
+ ])
+ def test_function(arg):
+ pass
+ """)
+ keywords = [item.keywords for item in items]
+ assert 'foo' in keywords[0] and 'bar' not in keywords[0] and 'baz' not in keywords[0]
+ assert 'foo' in keywords[1] and 'bar' in keywords[1] and 'baz' in keywords[1]
+
+ def test_function_equality_with_callspec(self, testdir, tmpdir):
+ items = testdir.getitems("""
+ import pytest
+ @pytest.mark.parametrize('arg', [1,2])
+ def test_function(arg):
+ pass
+ """)
+ assert items[0] != items[1]
+ assert not (items[0] == items[1])
+
+ def test_pyfunc_call(self, testdir):
+ item = testdir.getitem("def test_func(): raise ValueError")
+ config = item.config
+ class MyPlugin1:
+ def pytest_pyfunc_call(self, pyfuncitem):
+ raise ValueError
+ class MyPlugin2:
+ def pytest_pyfunc_call(self, pyfuncitem):
+ return True
+ config.pluginmanager.register(MyPlugin1())
+ config.pluginmanager.register(MyPlugin2())
+ config.hook.pytest_runtest_setup(item=item)
+ config.hook.pytest_pyfunc_call(pyfuncitem=item)
+
+ def test_multiple_parametrize(self, testdir):
+ modcol = testdir.getmodulecol("""
+ import pytest
+ @pytest.mark.parametrize('x', [0, 1])
+ @pytest.mark.parametrize('y', [2, 3])
+ def test1(x, y):
+ pass
+ """)
+ colitems = modcol.collect()
+ assert colitems[0].name == 'test1[2-0]'
+ assert colitems[1].name == 'test1[2-1]'
+ assert colitems[2].name == 'test1[3-0]'
+ assert colitems[3].name == 'test1[3-1]'
+
+ def test_issue751_multiple_parametrize_with_ids(self, testdir):
+ modcol = testdir.getmodulecol("""
+ import pytest
+ @pytest.mark.parametrize('x', [0], ids=['c'])
+ @pytest.mark.parametrize('y', [0, 1], ids=['a', 'b'])
+ class Test(object):
+ def test1(self, x, y):
+ pass
+ def test2(self, x, y):
+ pass
+ """)
+ colitems = modcol.collect()[0].collect()[0].collect()
+ assert colitems[0].name == 'test1[a-c]'
+ assert colitems[1].name == 'test1[b-c]'
+ assert colitems[2].name == 'test2[a-c]'
+ assert colitems[3].name == 'test2[b-c]'
+
+ def test_parametrize_skipif(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ m = pytest.mark.skipif('True')
+
+ @pytest.mark.parametrize('x', [0, 1, m(2)])
+ def test_skip_if(x):
+ assert x < 2
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *')
+
+ def test_parametrize_skip(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ m = pytest.mark.skip('')
+
+ @pytest.mark.parametrize('x', [0, 1, m(2)])
+ def test_skip(x):
+ assert x < 2
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *')
+
+ def test_parametrize_skipif_no_skip(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ m = pytest.mark.skipif('False')
+
+ @pytest.mark.parametrize('x', [0, 1, m(2)])
+ def test_skipif_no_skip(x):
+ assert x < 2
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('* 1 failed, 2 passed in *')
+
+ def test_parametrize_xfail(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ m = pytest.mark.xfail('True')
+
+ @pytest.mark.parametrize('x', [0, 1, m(2)])
+ def test_xfail(x):
+ assert x < 2
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('* 2 passed, 1 xfailed in *')
+
+ def test_parametrize_passed(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ m = pytest.mark.xfail('True')
+
+ @pytest.mark.parametrize('x', [0, 1, m(2)])
+ def test_xfail(x):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('* 2 passed, 1 xpassed in *')
+
+ def test_parametrize_xfail_passed(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ m = pytest.mark.xfail('False')
+
+ @pytest.mark.parametrize('x', [0, 1, m(2)])
+ def test_passed(x):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('* 3 passed in *')
+
+
+class TestSorting:
+ def test_check_equality(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def test_pass(): pass
+ def test_fail(): assert 0
+ """)
+ fn1 = testdir.collect_by_name(modcol, "test_pass")
+ assert isinstance(fn1, pytest.Function)
+ fn2 = testdir.collect_by_name(modcol, "test_pass")
+ assert isinstance(fn2, pytest.Function)
+
+ assert fn1 == fn2
+ assert fn1 != modcol
+ if py.std.sys.version_info < (3, 0):
+ assert cmp(fn1, fn2) == 0
+ assert hash(fn1) == hash(fn2)
+
+ fn3 = testdir.collect_by_name(modcol, "test_fail")
+ assert isinstance(fn3, pytest.Function)
+ assert not (fn1 == fn3)
+ assert fn1 != fn3
+
+ for fn in fn1,fn2,fn3:
+ assert fn != 3
+ assert fn != modcol
+ assert fn != [1,2,3]
+ assert [1,2,3] != fn
+ assert modcol != fn
+
+ def test_allow_sane_sorting_for_decorators(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def dec(f):
+ g = lambda: f(2)
+ g.place_as = f
+ return g
+
+
+ def test_b(y):
+ pass
+ test_b = dec(test_b)
+
+ def test_a(y):
+ pass
+ test_a = dec(test_a)
+ """)
+ colitems = modcol.collect()
+ assert len(colitems) == 2
+ assert [item.name for item in colitems] == ['test_b', 'test_a']
+
+
+class TestConftestCustomization:
+ def test_pytest_pycollect_module(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ class MyModule(pytest.Module):
+ pass
+ def pytest_pycollect_makemodule(path, parent):
+ if path.basename == "test_xyz.py":
+ return MyModule(path, parent)
+ """)
+ testdir.makepyfile("def test_some(): pass")
+ testdir.makepyfile(test_xyz="def test_func(): pass")
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*<Module*test_pytest*",
+ "*<MyModule*xyz*",
+ ])
+
+ def test_customized_pymakemodule_issue205_subdir(self, testdir):
+ b = testdir.mkdir("a").mkdir("b")
+ b.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_pycollect_makemodule(__multicall__):
+ mod = __multicall__.execute()
+ mod.obj.hello = "world"
+ return mod
+ """))
+ b.join("test_module.py").write(_pytest._code.Source("""
+ def test_hello():
+ assert hello == "world"
+ """))
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_customized_pymakeitem(self, testdir):
+ b = testdir.mkdir("a").mkdir("b")
+ b.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_pycollect_makeitem():
+ outcome = yield
+ if outcome.excinfo is None:
+ result = outcome.result
+ if result:
+ for func in result:
+ func._some123 = "world"
+ """))
+ b.join("test_module.py").write(_pytest._code.Source("""
+ import pytest
+
+ @pytest.fixture()
+ def obj(request):
+ return request.node._some123
+ def test_hello(obj):
+ assert obj == "world"
+ """))
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_pytest_pycollect_makeitem(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ class MyFunction(pytest.Function):
+ pass
+ def pytest_pycollect_makeitem(collector, name, obj):
+ if name == "some":
+ return MyFunction(name, collector)
+ """)
+ testdir.makepyfile("def some(): pass")
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*MyFunction*some*",
+ ])
+
+ def test_makeitem_non_underscore(self, testdir, monkeypatch):
+ modcol = testdir.getmodulecol("def _hello(): pass")
+ l = []
+ monkeypatch.setattr(pytest.Module, 'makeitem',
+ lambda self, name, obj: l.append(name))
+ l = modcol.collect()
+ assert '_hello' not in l
+
+def test_setup_only_available_in_subdir(testdir):
+ sub1 = testdir.mkpydir("sub1")
+ sub2 = testdir.mkpydir("sub2")
+ sub1.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+ def pytest_runtest_setup(item):
+ assert item.fspath.purebasename == "test_in_sub1"
+ def pytest_runtest_call(item):
+ assert item.fspath.purebasename == "test_in_sub1"
+ def pytest_runtest_teardown(item):
+ assert item.fspath.purebasename == "test_in_sub1"
+ """))
+ sub2.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+ def pytest_runtest_setup(item):
+ assert item.fspath.purebasename == "test_in_sub2"
+ def pytest_runtest_call(item):
+ assert item.fspath.purebasename == "test_in_sub2"
+ def pytest_runtest_teardown(item):
+ assert item.fspath.purebasename == "test_in_sub2"
+ """))
+ sub1.join("test_in_sub1.py").write("def test_1(): pass")
+ sub2.join("test_in_sub2.py").write("def test_2(): pass")
+ result = testdir.runpytest("-v", "-s")
+ result.assert_outcomes(passed=2)
+
+def test_modulecol_roundtrip(testdir):
+ modcol = testdir.getmodulecol("pass", withinit=True)
+ trail = modcol.nodeid
+ newcol = modcol.session.perform_collect([trail], genitems=0)[0]
+ assert modcol.name == newcol.name
+
+
+class TestTracebackCutting:
+ def test_skip_simple(self):
+ excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")')
+ assert excinfo.traceback[-1].frame.code.name == "skip"
+ assert excinfo.traceback[-1].ishidden()
+
+ def test_traceback_argsetup(self, testdir):
+ testdir.makeconftest("""
+ def pytest_funcarg__hello(request):
+ raise ValueError("xyz")
+ """)
+ p = testdir.makepyfile("def test(hello): pass")
+ result = testdir.runpytest(p)
+ assert result.ret != 0
+ out = result.stdout.str()
+ assert out.find("xyz") != -1
+ assert out.find("conftest.py:2: ValueError") != -1
+ numentries = out.count("_ _ _") # separator for traceback entries
+ assert numentries == 0
+
+ result = testdir.runpytest("--fulltrace", p)
+ out = result.stdout.str()
+ assert out.find("conftest.py:2: ValueError") != -1
+ numentries = out.count("_ _ _ _") # separator for traceback entries
+ assert numentries > 3
+
+ def test_traceback_error_during_import(self, testdir):
+ testdir.makepyfile("""
+ x = 1
+ x = 2
+ x = 17
+ asd
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ out = result.stdout.str()
+ assert "x = 1" not in out
+ assert "x = 2" not in out
+ result.stdout.fnmatch_lines([
+ " *asd*",
+ "E*NameError*",
+ ])
+ result = testdir.runpytest("--fulltrace")
+ out = result.stdout.str()
+ assert "x = 1" in out
+ assert "x = 2" in out
+ result.stdout.fnmatch_lines([
+ ">*asd*",
+ "E*NameError*",
+ ])
+
+ def test_traceback_filter_error_during_fixture_collection(self, testdir):
+ """integration test for issue #995.
+ """
+ testdir.makepyfile("""
+ import pytest
+
+ def fail_me(func):
+ ns = {}
+ exec('def w(): raise ValueError("fail me")', ns)
+ return ns['w']
+
+ @pytest.fixture(scope='class')
+ @fail_me
+ def fail_fixture():
+ pass
+
+ def test_failing_fixture(fail_fixture):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ out = result.stdout.str()
+ assert "INTERNALERROR>" not in out
+ result.stdout.fnmatch_lines([
+ "*ValueError: fail me*",
+ "* 1 error in *",
+ ])
+
+ def test_filter_traceback_generated_code(self):
+ """test that filter_traceback() works with the fact that
+ py.code.Code.path attribute might return an str object.
+ In this case, one of the entries on the traceback was produced by
+ dynamically generated code.
+ See: https://bitbucket.org/pytest-dev/py/issues/71
+ This fixes #995.
+ """
+ from _pytest.python import filter_traceback
+ try:
+ ns = {}
+ exec('def foo(): raise ValueError', ns)
+ ns['foo']()
+ except ValueError:
+ _, _, tb = sys.exc_info()
+
+ tb = _pytest._code.Traceback(tb)
+ assert isinstance(tb[-1].path, str)
+ assert not filter_traceback(tb[-1])
+
+ def test_filter_traceback_path_no_longer_valid(self, testdir):
+ """test that filter_traceback() works with the fact that
+ py.code.Code.path attribute might return an str object.
+ In this case, one of the files in the traceback no longer exists.
+ This fixes #1133.
+ """
+ from _pytest.python import filter_traceback
+ testdir.syspathinsert()
+ testdir.makepyfile(filter_traceback_entry_as_str='''
+ def foo():
+ raise ValueError
+ ''')
+ try:
+ import filter_traceback_entry_as_str
+ filter_traceback_entry_as_str.foo()
+ except ValueError:
+ _, _, tb = sys.exc_info()
+
+ testdir.tmpdir.join('filter_traceback_entry_as_str.py').remove()
+ tb = _pytest._code.Traceback(tb)
+ assert isinstance(tb[-1].path, str)
+ assert filter_traceback(tb[-1])
+
+
+class TestReportInfo:
+ def test_itemreport_reportinfo(self, testdir, linecomp):
+ testdir.makeconftest("""
+ import pytest
+ class MyFunction(pytest.Function):
+ def reportinfo(self):
+ return "ABCDE", 42, "custom"
+ def pytest_pycollect_makeitem(collector, name, obj):
+ if name == "test_func":
+ return MyFunction(name, parent=collector)
+ """)
+ item = testdir.getitem("def test_func(): pass")
+ item.config.pluginmanager.getplugin("runner")
+ assert item.location == ("ABCDE", 42, "custom")
+
+ def test_func_reportinfo(self, testdir):
+ item = testdir.getitem("def test_func(): pass")
+ fspath, lineno, modpath = item.reportinfo()
+ assert fspath == item.fspath
+ assert lineno == 0
+ assert modpath == "test_func"
+
+ def test_class_reportinfo(self, testdir):
+ modcol = testdir.getmodulecol("""
+ # lineno 0
+ class TestClass:
+ def test_hello(self): pass
+ """)
+ classcol = testdir.collect_by_name(modcol, "TestClass")
+ fspath, lineno, msg = classcol.reportinfo()
+ assert fspath == modcol.fspath
+ assert lineno == 1
+ assert msg == "TestClass"
+
+ def test_generator_reportinfo(self, testdir):
+ modcol = testdir.getmodulecol("""
+ # lineno 0
+ def test_gen():
+ def check(x):
+ assert x
+ yield check, 3
+ """)
+ gencol = testdir.collect_by_name(modcol, "test_gen")
+ fspath, lineno, modpath = gencol.reportinfo()
+ assert fspath == modcol.fspath
+ assert lineno == 1
+ assert modpath == "test_gen"
+
+ genitem = gencol.collect()[0]
+ fspath, lineno, modpath = genitem.reportinfo()
+ assert fspath == modcol.fspath
+ assert lineno == 2
+ assert modpath == "test_gen[0]"
+ """
+ def test_func():
+ pass
+ def test_genfunc():
+ def check(x):
+ pass
+ yield check, 3
+ class TestClass:
+ def test_method(self):
+ pass
+ """
+
+ def test_reportinfo_with_nasty_getattr(self, testdir):
+ # https://github.com/pytest-dev/pytest/issues/1204
+ modcol = testdir.getmodulecol("""
+ # lineno 0
+ class TestClass:
+ def __getattr__(self, name):
+ return "this is not an int"
+
+ def test_foo(self):
+ pass
+ """)
+ classcol = testdir.collect_by_name(modcol, "TestClass")
+ instance = classcol.collect()[0]
+ fspath, lineno, msg = instance.reportinfo()
+
+
+def test_customized_python_discovery(testdir):
+ testdir.makeini("""
+ [pytest]
+ python_files=check_*.py
+ python_classes=Check
+ python_functions=check
+ """)
+ p = testdir.makepyfile("""
+ def check_simple():
+ pass
+ class CheckMyApp:
+ def check_meth(self):
+ pass
+ """)
+ p2 = p.new(basename=p.basename.replace("test", "check"))
+ p.move(p2)
+ result = testdir.runpytest("--collect-only", "-s")
+ result.stdout.fnmatch_lines([
+ "*check_customized*",
+ "*check_simple*",
+ "*CheckMyApp*",
+ "*check_meth*",
+ ])
+
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*2 passed*",
+ ])
+
+
+def test_customized_python_discovery_functions(testdir):
+ testdir.makeini("""
+ [pytest]
+ python_functions=_test
+ """)
+ testdir.makepyfile("""
+ def _test_underscore():
+ pass
+ """)
+ result = testdir.runpytest("--collect-only", "-s")
+ result.stdout.fnmatch_lines([
+ "*_test_underscore*",
+ ])
+
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+
+
+def test_collector_attributes(testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_pycollect_makeitem(collector):
+ assert collector.Function == pytest.Function
+ assert collector.Class == pytest.Class
+ assert collector.Instance == pytest.Instance
+ assert collector.Module == pytest.Module
+ """)
+ testdir.makepyfile("""
+ def test_hello():
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+
+def test_customize_through_attributes(testdir):
+ testdir.makeconftest("""
+ import pytest
+ class MyFunction(pytest.Function):
+ pass
+ class MyInstance(pytest.Instance):
+ Function = MyFunction
+ class MyClass(pytest.Class):
+ Instance = MyInstance
+
+ def pytest_pycollect_makeitem(collector, name, obj):
+ if name.startswith("MyTestClass"):
+ return MyClass(name, parent=collector)
+ """)
+ testdir.makepyfile("""
+ class MyTestClass:
+ def test_hello(self):
+ pass
+ """)
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*MyClass*",
+ "*MyInstance*",
+ "*MyFunction*test_hello*",
+ ])
+
+
+def test_unorderable_types(testdir):
+ testdir.makepyfile("""
+ class TestJoinEmpty:
+ pass
+
+ def make_test():
+ class Test:
+ pass
+ Test.__name__ = "TestFoo"
+ return Test
+ TestFoo = make_test()
+ """)
+ result = testdir.runpytest()
+ assert "TypeError" not in result.stdout.str()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+
+def test_collect_functools_partial(testdir):
+ """
+ Test that collection of functools.partial object works, and arguments
+ to the wrapped functions are dealt correctly (see #811).
+ """
+ testdir.makepyfile("""
+ import functools
+ import pytest
+
+ @pytest.fixture
+ def fix1():
+ return 'fix1'
+
+ @pytest.fixture
+ def fix2():
+ return 'fix2'
+
+ def check1(i, fix1):
+ assert i == 2
+ assert fix1 == 'fix1'
+
+ def check2(fix1, i):
+ assert i == 2
+ assert fix1 == 'fix1'
+
+ def check3(fix1, i, fix2):
+ assert i == 2
+ assert fix1 == 'fix1'
+ assert fix2 == 'fix2'
+
+ test_ok_1 = functools.partial(check1, i=2)
+ test_ok_2 = functools.partial(check1, i=2, fix1='fix1')
+ test_ok_3 = functools.partial(check1, 2)
+ test_ok_4 = functools.partial(check2, i=2)
+ test_ok_5 = functools.partial(check3, i=2)
+ test_ok_6 = functools.partial(check3, i=2, fix1='fix1')
+
+ test_fail_1 = functools.partial(check2, 2)
+ test_fail_2 = functools.partial(check3, 2)
+ """)
+ result = testdir.inline_run()
+ result.assertoutcome(passed=6, failed=2)
+
+
+def test_dont_collect_non_function_callable(testdir):
+ """Test for issue https://github.com/pytest-dev/pytest/issues/331
+
+ In this case an INTERNALERROR occurred trying to report the failure of
+ a test like this one because py test failed to get the source lines.
+ """
+ testdir.makepyfile("""
+ class Oh(object):
+ def __call__(self):
+ pass
+
+ test_a = Oh()
+
+ def test_real():
+ pass
+ """)
+ result = testdir.runpytest('-rw')
+ result.stdout.fnmatch_lines([
+ '*collected 1 item*',
+ 'WC2 *',
+ '*1 passed, 1 pytest-warnings in *',
+ ])
+
+
+def test_class_injection_does_not_break_collection(testdir):
+ """Tests whether injection during collection time will terminate testing.
+
+ In this case the error should not occur if the TestClass itself
+ is modified during collection time, and the original method list
+ is still used for collection.
+ """
+ testdir.makeconftest("""
+ from test_inject import TestClass
+ def pytest_generate_tests(metafunc):
+ TestClass.changed_var = {}
+ """)
+ testdir.makepyfile(test_inject='''
+ class TestClass(object):
+ def test_injection(self):
+ """Test being parametrized."""
+ pass
+ ''')
+ result = testdir.runpytest()
+ assert "RuntimeError: dictionary changed size during iteration" not in result.stdout.str()
+ result.stdout.fnmatch_lines(['*1 passed*'])
+
+
+def test_syntax_error_with_non_ascii_chars(testdir):
+ """Fix decoding issue while formatting SyntaxErrors during collection (#578)
+ """
+ testdir.makepyfile(u"""
+ # -*- coding: UTF-8 -*-
+
+ ☃
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ '*ERROR collecting*',
+ '*SyntaxError*',
+ '*1 error in*',
+ ])
diff --git a/testing/web-platform/tests/tools/pytest/testing/python/fixture.py b/testing/web-platform/tests/tools/pytest/testing/python/fixture.py
new file mode 100644
index 000000000..506d8426e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/python/fixture.py
@@ -0,0 +1,2693 @@
+from textwrap import dedent
+
+import _pytest._code
+import pytest
+import sys
+from _pytest import python as funcargs
+from _pytest.pytester import get_public_names
+from _pytest.python import FixtureLookupError
+
+
+def test_getfuncargnames():
+ def f(): pass
+ assert not funcargs.getfuncargnames(f)
+ def g(arg): pass
+ assert funcargs.getfuncargnames(g) == ('arg',)
+ def h(arg1, arg2="hello"): pass
+ assert funcargs.getfuncargnames(h) == ('arg1',)
+ def h(arg1, arg2, arg3="hello"): pass
+ assert funcargs.getfuncargnames(h) == ('arg1', 'arg2')
+ class A:
+ def f(self, arg1, arg2="hello"):
+ pass
+ assert funcargs.getfuncargnames(A().f) == ('arg1',)
+ if sys.version_info < (3,0):
+ assert funcargs.getfuncargnames(A.f) == ('arg1',)
+
+class TestFillFixtures:
+ def test_fillfuncargs_exposed(self):
+ # used by oejskit, kept for compatibility
+ assert pytest._fillfuncargs == funcargs.fillfixtures
+
+ def test_funcarg_lookupfails(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__xyzsomething(request):
+ return 42
+
+ def test_func(some):
+ pass
+ """)
+ result = testdir.runpytest() # "--collect-only")
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*def test_func(some)*",
+ "*fixture*some*not found*",
+ "*xyzsomething*",
+ ])
+
+ def test_funcarg_basic(self, testdir):
+ item = testdir.getitem("""
+ def pytest_funcarg__some(request):
+ return request.function.__name__
+ def pytest_funcarg__other(request):
+ return 42
+ def test_func(some, other):
+ pass
+ """)
+ funcargs.fillfixtures(item)
+ del item.funcargs["request"]
+ assert len(get_public_names(item.funcargs)) == 2
+ assert item.funcargs['some'] == "test_func"
+ assert item.funcargs['other'] == 42
+
+ def test_funcarg_lookup_modulelevel(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__something(request):
+ return request.function.__name__
+
+ class TestClass:
+ def test_method(self, something):
+ assert something == "test_method"
+ def test_func(something):
+ assert something == "test_func"
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_funcarg_lookup_classlevel(self, testdir):
+ p = testdir.makepyfile("""
+ class TestClass:
+ def pytest_funcarg__something(self, request):
+ return request.instance
+ def test_method(self, something):
+ assert something is self
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+ def test_conftest_funcargs_only_available_in_subdir(self, testdir):
+ sub1 = testdir.mkpydir("sub1")
+ sub2 = testdir.mkpydir("sub2")
+ sub1.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+ def pytest_funcarg__arg1(request):
+ pytest.raises(Exception, "request.getfuncargvalue('arg2')")
+ """))
+ sub2.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+ def pytest_funcarg__arg2(request):
+ pytest.raises(Exception, "request.getfuncargvalue('arg1')")
+ """))
+
+ sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
+ sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
+ result = testdir.runpytest("-v")
+ result.assert_outcomes(passed=2)
+
+ def test_extend_fixture_module_class(self, testdir):
+ testfile = testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+
+ class TestSpam:
+
+ @pytest.fixture
+ def spam(self, spam):
+ return spam * 2
+
+ def test_spam(self, spam):
+ assert spam == 'spamspam'
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*1 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_extend_fixture_conftest_module(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+ """)
+ testfile = testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def spam(spam):
+ return spam * 2
+
+ def test_spam(spam):
+ assert spam == 'spamspam'
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*1 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_extend_fixture_conftest_conftest(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+ """)
+ pkg = testdir.mkpydir("pkg")
+ pkg.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+
+ @pytest.fixture
+ def spam(spam):
+ return spam * 2
+ """))
+ testfile = pkg.join("test_spam.py")
+ testfile.write(_pytest._code.Source("""
+ def test_spam(spam):
+ assert spam == "spamspam"
+ """))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*1 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_extend_fixture_conftest_plugin(self, testdir):
+ testdir.makepyfile(testplugin="""
+ import pytest
+
+ @pytest.fixture
+ def foo():
+ return 7
+ """)
+ testdir.syspathinsert()
+ testdir.makeconftest("""
+ import pytest
+
+ pytest_plugins = 'testplugin'
+
+ @pytest.fixture
+ def foo(foo):
+ return foo + 7
+ """)
+ testdir.makepyfile("""
+ def test_foo(foo):
+ assert foo == 14
+ """)
+ result = testdir.runpytest('-s')
+ assert result.ret == 0
+
+ def test_extend_fixture_plugin_plugin(self, testdir):
+ # Two plugins should extend each order in loading order
+ testdir.makepyfile(testplugin0="""
+ import pytest
+
+ @pytest.fixture
+ def foo():
+ return 7
+ """)
+ testdir.makepyfile(testplugin1="""
+ import pytest
+
+ @pytest.fixture
+ def foo(foo):
+ return foo + 7
+ """)
+ testdir.syspathinsert()
+ testdir.makepyfile("""
+ pytest_plugins = ['testplugin0', 'testplugin1']
+
+ def test_foo(foo):
+ assert foo == 14
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+
+ def test_override_parametrized_fixture_conftest_module(self, testdir):
+ """Test override of the parametrized fixture with non-parametrized one on the test module level."""
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture(params=[1, 2, 3])
+ def spam(request):
+ return request.param
+ """)
+ testfile = testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+
+ def test_spam(spam):
+ assert spam == 'spam'
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*1 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_override_parametrized_fixture_conftest_conftest(self, testdir):
+ """Test override of the parametrized fixture with non-parametrized one on the conftest level."""
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture(params=[1, 2, 3])
+ def spam(request):
+ return request.param
+ """)
+ subdir = testdir.mkpydir('subdir')
+ subdir.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+ """))
+ testfile = subdir.join("test_spam.py")
+ testfile.write(_pytest._code.Source("""
+ def test_spam(spam):
+ assert spam == "spam"
+ """))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*1 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_override_non_parametrized_fixture_conftest_module(self, testdir):
+ """Test override of the non-parametrized fixture with parametrized one on the test module level."""
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+ """)
+ testfile = testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(params=[1, 2, 3])
+ def spam(request):
+ return request.param
+
+ params = {'spam': 1}
+
+ def test_spam(spam):
+ assert spam == params['spam']
+ params['spam'] += 1
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*3 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*3 passed*"])
+
+ def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
+ """Test override of the non-parametrized fixture with parametrized one on the conftest level."""
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture
+ def spam():
+ return 'spam'
+ """)
+ subdir = testdir.mkpydir('subdir')
+ subdir.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+
+ @pytest.fixture(params=[1, 2, 3])
+ def spam(request):
+ return request.param
+ """))
+ testfile = subdir.join("test_spam.py")
+ testfile.write(_pytest._code.Source("""
+ params = {'spam': 1}
+
+ def test_spam(spam):
+ assert spam == params['spam']
+ params['spam'] += 1
+ """))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*3 passed*"])
+ result = testdir.runpytest(testfile)
+ result.stdout.fnmatch_lines(["*3 passed*"])
+
+ def test_autouse_fixture_plugin(self, testdir):
+ # A fixture from a plugin has no baseid set, which screwed up
+ # the autouse fixture handling.
+ testdir.makepyfile(testplugin="""
+ import pytest
+
+ @pytest.fixture(autouse=True)
+ def foo(request):
+ request.function.foo = 7
+ """)
+ testdir.syspathinsert()
+ testdir.makepyfile("""
+ pytest_plugins = 'testplugin'
+
+ def test_foo(request):
+ assert request.function.foo == 7
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+
+ def test_funcarg_lookup_error(self, testdir):
+ testdir.makepyfile("""
+ def test_lookup_error(unknown):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ERROR*test_lookup_error*",
+ "*def test_lookup_error(unknown):*",
+ "*fixture*unknown*not found*",
+ "*available fixtures*",
+ "*1 error*",
+ ])
+ assert "INTERNAL" not in result.stdout.str()
+
+ def test_fixture_excinfo_leak(self, testdir):
+ # on python2 sys.excinfo would leak into fixture executions
+ testdir.makepyfile("""
+ import sys
+ import traceback
+ import pytest
+
+ @pytest.fixture
+ def leak():
+ if sys.exc_info()[0]: # python3 bug :)
+ traceback.print_exc()
+ #fails
+ assert sys.exc_info() == (None, None, None)
+
+ def test_leak(leak):
+ if sys.exc_info()[0]: # python3 bug :)
+ traceback.print_exc()
+ assert sys.exc_info() == (None, None, None)
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+
+
+class TestRequestBasic:
+ def test_request_attributes(self, testdir):
+ item = testdir.getitem("""
+ def pytest_funcarg__something(request): pass
+ def test_func(something): pass
+ """)
+ req = funcargs.FixtureRequest(item)
+ assert req.function == item.obj
+ assert req.keywords == item.keywords
+ assert hasattr(req.module, 'test_func')
+ assert req.cls is None
+ assert req.function.__name__ == "test_func"
+ assert req.config == item.config
+ assert repr(req).find(req.function.__name__) != -1
+
+ def test_request_attributes_method(self, testdir):
+ item, = testdir.getitems("""
+ class TestB:
+ def pytest_funcarg__something(self, request):
+ return 1
+ def test_func(self, something):
+ pass
+ """)
+ req = item._request
+ assert req.cls.__name__ == "TestB"
+ assert req.instance.__class__ == req.cls
+
+ def XXXtest_request_contains_funcarg_arg2fixturedefs(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def pytest_funcarg__something(request):
+ pass
+ class TestClass:
+ def test_method(self, something):
+ pass
+ """)
+ item1, = testdir.genitems([modcol])
+ assert item1.name == "test_method"
+ arg2fixturedefs = funcargs.FixtureRequest(item1)._arg2fixturedefs
+ assert len(arg2fixturedefs) == 1
+ assert arg2fixturedefs[0].__name__ == "pytest_funcarg__something"
+
+ def test_getfuncargvalue_recursive(self, testdir):
+ testdir.makeconftest("""
+ def pytest_funcarg__something(request):
+ return 1
+ """)
+ testdir.makepyfile("""
+ def pytest_funcarg__something(request):
+ return request.getfuncargvalue("something") + 1
+ def test_func(something):
+ assert something == 2
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_getfuncargvalue(self, testdir):
+ item = testdir.getitem("""
+ l = [2]
+ def pytest_funcarg__something(request): return 1
+ def pytest_funcarg__other(request):
+ return l.pop()
+ def test_func(something): pass
+ """)
+ req = item._request
+ pytest.raises(FixtureLookupError, req.getfuncargvalue, "notexists")
+ val = req.getfuncargvalue("something")
+ assert val == 1
+ val = req.getfuncargvalue("something")
+ assert val == 1
+ val2 = req.getfuncargvalue("other")
+ assert val2 == 2
+ val2 = req.getfuncargvalue("other") # see about caching
+ assert val2 == 2
+ pytest._fillfuncargs(item)
+ assert item.funcargs["something"] == 1
+ assert len(get_public_names(item.funcargs)) == 2
+ assert "request" in item.funcargs
+ #assert item.funcargs == {'something': 1, "other": 2}
+
+ def test_request_addfinalizer(self, testdir):
+ item = testdir.getitem("""
+ teardownlist = []
+ def pytest_funcarg__something(request):
+ request.addfinalizer(lambda: teardownlist.append(1))
+ def test_func(something): pass
+ """)
+ item.session._setupstate.prepare(item)
+ pytest._fillfuncargs(item)
+ # successively check finalization calls
+ teardownlist = item.getparent(pytest.Module).obj.teardownlist
+ ss = item.session._setupstate
+ assert not teardownlist
+ ss.teardown_exact(item, None)
+ print(ss.stack)
+ assert teardownlist == [1]
+
+ def test_request_addfinalizer_failing_setup(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = [1]
+ @pytest.fixture
+ def myfix(request):
+ request.addfinalizer(l.pop)
+ assert 0
+ def test_fix(myfix):
+ pass
+ def test_finalizer_ran():
+ assert not l
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(failed=1, passed=1)
+
+ def test_request_addfinalizer_failing_setup_module(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = [1, 2]
+ @pytest.fixture(scope="module")
+ def myfix(request):
+ request.addfinalizer(l.pop)
+ request.addfinalizer(l.pop)
+ assert 0
+ def test_fix(myfix):
+ pass
+ """)
+ reprec = testdir.inline_run("-s")
+ mod = reprec.getcalls("pytest_runtest_setup")[0].item.module
+ assert not mod.l
+
+
+ def test_request_addfinalizer_partial_setup_failure(self, testdir):
+ p = testdir.makepyfile("""
+ l = []
+ def pytest_funcarg__something(request):
+ request.addfinalizer(lambda: l.append(None))
+ def test_func(something, missingarg):
+ pass
+ def test_second():
+ assert len(l) == 1
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*1 error*" # XXX the whole module collection fails
+ ])
+
+ def test_request_getmodulepath(self, testdir):
+ modcol = testdir.getmodulecol("def test_somefunc(): pass")
+ item, = testdir.genitems([modcol])
+ req = funcargs.FixtureRequest(item)
+ assert req.fspath == modcol.fspath
+
+ def test_request_fixturenames(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ from _pytest.pytester import get_public_names
+ @pytest.fixture()
+ def arg1():
+ pass
+ @pytest.fixture()
+ def farg(arg1):
+ pass
+ @pytest.fixture(autouse=True)
+ def sarg(tmpdir):
+ pass
+ def test_function(request, farg):
+ assert set(get_public_names(request.fixturenames)) == \
+ set(["tmpdir", "sarg", "arg1", "request", "farg",
+ "tmpdir_factory"])
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_funcargnames_compatattr(self, testdir):
+ testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ assert metafunc.funcargnames == metafunc.fixturenames
+ def pytest_funcarg__fn(request):
+ assert request._pyfuncitem.funcargnames == \
+ request._pyfuncitem.fixturenames
+ return request.funcargnames, request.fixturenames
+
+ def test_hello(fn):
+ assert fn[0] == fn[1]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_setupdecorator_and_xunit(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(scope='module', autouse=True)
+ def setup_module():
+ l.append("module")
+ @pytest.fixture(autouse=True)
+ def setup_function():
+ l.append("function")
+
+ def test_func():
+ pass
+
+ class TestClass:
+ @pytest.fixture(scope="class", autouse=True)
+ def setup_class(self):
+ l.append("class")
+ @pytest.fixture(autouse=True)
+ def setup_method(self):
+ l.append("method")
+ def test_method(self):
+ pass
+ def test_all():
+ assert l == ["module", "function", "class",
+ "function", "method", "function"]
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=3)
+
+ def test_fixtures_sub_subdir_normalize_sep(self, testdir):
+ # this tests that normalization of nodeids takes place
+ b = testdir.mkdir("tests").mkdir("unit")
+ b.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_funcarg__arg1():
+ pass
+ """))
+ p = b.join("test_module.py")
+ p.write("def test_func(arg1): pass")
+ result = testdir.runpytest(p, "--fixtures")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines("""
+ *fixtures defined*conftest*
+ *arg1*
+ """)
+
+ def test_show_fixtures_color_yes(self, testdir):
+ testdir.makepyfile("def test_this(): assert 1")
+ result = testdir.runpytest('--color=yes', '--fixtures')
+ assert '\x1b[32mtmpdir' in result.stdout.str()
+
+ def test_newstyle_with_request(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture()
+ def arg(request):
+ pass
+ def test_1(arg):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_setupcontext_no_param(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(params=[1,2])
+ def arg(request):
+ return request.param
+
+ @pytest.fixture(autouse=True)
+ def mysetup(request, arg):
+ assert not hasattr(request, "param")
+ def test_1(arg):
+ assert arg in (1,2)
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+class TestRequestMarking:
+ def test_applymarker(self, testdir):
+ item1,item2 = testdir.getitems("""
+ def pytest_funcarg__something(request):
+ pass
+ class TestClass:
+ def test_func1(self, something):
+ pass
+ def test_func2(self, something):
+ pass
+ """)
+ req1 = funcargs.FixtureRequest(item1)
+ assert 'xfail' not in item1.keywords
+ req1.applymarker(pytest.mark.xfail)
+ assert 'xfail' in item1.keywords
+ assert 'skipif' not in item1.keywords
+ req1.applymarker(pytest.mark.skipif)
+ assert 'skipif' in item1.keywords
+ pytest.raises(ValueError, "req1.applymarker(42)")
+
+ def test_accesskeywords(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture()
+ def keywords(request):
+ return request.keywords
+ @pytest.mark.XYZ
+ def test_function(keywords):
+ assert keywords["XYZ"]
+ assert "abc" not in keywords
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_accessmarker_dynamic(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ @pytest.fixture()
+ def keywords(request):
+ return request.keywords
+
+ @pytest.fixture(scope="class", autouse=True)
+ def marking(request):
+ request.applymarker(pytest.mark.XYZ("hello"))
+ """)
+ testdir.makepyfile("""
+ import pytest
+ def test_fun1(keywords):
+ assert keywords["XYZ"] is not None
+ assert "abc" not in keywords
+ def test_fun2(keywords):
+ assert keywords["XYZ"] is not None
+ assert "abc" not in keywords
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+class TestRequestCachedSetup:
+ def test_request_cachedsetup_defaultmodule(self, testdir):
+ reprec = testdir.inline_runsource("""
+ mysetup = ["hello",].pop
+
+ def pytest_funcarg__something(request):
+ return request.cached_setup(mysetup, scope="module")
+
+ def test_func1(something):
+ assert something == "hello"
+ class TestClass:
+ def test_func1a(self, something):
+ assert something == "hello"
+ """)
+ reprec.assertoutcome(passed=2)
+
+ def test_request_cachedsetup_class(self, testdir):
+ reprec = testdir.inline_runsource("""
+ mysetup = ["hello", "hello2", "hello3"].pop
+
+ def pytest_funcarg__something(request):
+ return request.cached_setup(mysetup, scope="class")
+ def test_func1(something):
+ assert something == "hello3"
+ def test_func2(something):
+ assert something == "hello2"
+ class TestClass:
+ def test_func1a(self, something):
+ assert something == "hello"
+ def test_func2b(self, something):
+ assert something == "hello"
+ """)
+ reprec.assertoutcome(passed=4)
+
+ def test_request_cachedsetup_extrakey(self, testdir):
+ item1 = testdir.getitem("def test_func(): pass")
+ req1 = funcargs.FixtureRequest(item1)
+ l = ["hello", "world"]
+ def setup():
+ return l.pop()
+ ret1 = req1.cached_setup(setup, extrakey=1)
+ ret2 = req1.cached_setup(setup, extrakey=2)
+ assert ret2 == "hello"
+ assert ret1 == "world"
+ ret1b = req1.cached_setup(setup, extrakey=1)
+ ret2b = req1.cached_setup(setup, extrakey=2)
+ assert ret1 == ret1b
+ assert ret2 == ret2b
+
+ def test_request_cachedsetup_cache_deletion(self, testdir):
+ item1 = testdir.getitem("def test_func(): pass")
+ req1 = funcargs.FixtureRequest(item1)
+ l = []
+ def setup():
+ l.append("setup")
+ def teardown(val):
+ l.append("teardown")
+ req1.cached_setup(setup, teardown, scope="function")
+ assert l == ['setup']
+ # artificial call of finalizer
+ setupstate = req1._pyfuncitem.session._setupstate
+ setupstate._callfinalizers(item1)
+ assert l == ["setup", "teardown"]
+ req1.cached_setup(setup, teardown, scope="function")
+ assert l == ["setup", "teardown", "setup"]
+ setupstate._callfinalizers(item1)
+ assert l == ["setup", "teardown", "setup", "teardown"]
+
+ def test_request_cached_setup_two_args(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__arg1(request):
+ return request.cached_setup(lambda: 42)
+ def pytest_funcarg__arg2(request):
+ return request.cached_setup(lambda: 17)
+ def test_two_different_setups(arg1, arg2):
+ assert arg1 != arg2
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+ def test_request_cached_setup_getfuncargvalue(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__arg1(request):
+ arg1 = request.getfuncargvalue("arg2")
+ return request.cached_setup(lambda: arg1 + 1)
+ def pytest_funcarg__arg2(request):
+ return request.cached_setup(lambda: 10)
+ def test_two_funcarg(arg1):
+ assert arg1 == 11
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+ def test_request_cached_setup_functional(self, testdir):
+ testdir.makepyfile(test_0="""
+ l = []
+ def pytest_funcarg__something(request):
+ val = request.cached_setup(fsetup, fteardown)
+ return val
+ def fsetup(mycache=[1]):
+ l.append(mycache.pop())
+ return l
+ def fteardown(something):
+ l.remove(something[0])
+ l.append(2)
+ def test_list_once(something):
+ assert something == [1]
+ def test_list_twice(something):
+ assert something == [1]
+ """)
+ testdir.makepyfile(test_1="""
+ import test_0 # should have run already
+ def test_check_test0_has_teardown_correct():
+ assert test_0.l == [2]
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*3 passed*"
+ ])
+
+ def test_issue117_sessionscopeteardown(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__app(request):
+ app = request.cached_setup(
+ scope='session',
+ setup=lambda: 0,
+ teardown=lambda x: 3/x)
+ return app
+ def test_func(app):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*3/x*",
+ "*ZeroDivisionError*",
+ ])
+
+class TestFixtureUsages:
+ def test_noargfixturedec(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture
+ def arg1():
+ return 1
+
+ def test_func(arg1):
+ assert arg1 == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_receives_funcargs(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture()
+ def arg1():
+ return 1
+
+ @pytest.fixture()
+ def arg2(arg1):
+ return arg1 + 1
+
+ def test_add(arg2):
+ assert arg2 == 2
+ def test_all(arg1, arg2):
+ assert arg1 == 1
+ assert arg2 == 2
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_receives_funcargs_scope_mismatch(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope="function")
+ def arg1():
+ return 1
+
+ @pytest.fixture(scope="module")
+ def arg2(arg1):
+ return arg1 + 1
+
+ def test_add(arg2):
+ assert arg2 == 2
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ScopeMismatch*involved factories*",
+ "* def arg2*",
+ "* def arg1*",
+ "*1 error*"
+ ])
+
+ def test_receives_funcargs_scope_mismatch_issue660(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope="function")
+ def arg1():
+ return 1
+
+ @pytest.fixture(scope="module")
+ def arg2(arg1):
+ return arg1 + 1
+
+ def test_add(arg1, arg2):
+ assert arg2 == 2
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ScopeMismatch*involved factories*",
+ "* def arg2*",
+ "*1 error*"
+ ])
+
+ def test_funcarg_parametrized_and_used_twice(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(params=[1,2])
+ def arg1(request):
+ l.append(1)
+ return request.param
+
+ @pytest.fixture()
+ def arg2(arg1):
+ return arg1 + 1
+
+ def test_add(arg1, arg2):
+ assert arg2 == arg1 + 1
+ assert len(l) == arg1
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 passed*"
+ ])
+
+ def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture()
+ def fail(missing):
+ return
+
+ @pytest.fixture()
+ def call_fail(fail):
+ return
+
+ def test_missing(call_fail):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *pytest.fixture()*
+ *def call_fail(fail)*
+ *pytest.fixture()*
+ *def fail*
+ *fixture*'missing'*not found*
+ """)
+
+ def test_factory_setup_as_classes_fails(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ class arg1:
+ def __init__(self, request):
+ self.x = 1
+ arg1 = pytest.fixture()(arg1)
+
+ """)
+ reprec = testdir.inline_run()
+ l = reprec.getfailedcollections()
+ assert len(l) == 1
+
+ def test_request_can_be_overridden(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture()
+ def request(request):
+ request.a = 1
+ return request
+ def test_request(request):
+ assert request.a == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_usefixtures_marker(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ l = []
+
+ @pytest.fixture(scope="class")
+ def myfix(request):
+ request.cls.hello = "world"
+ l.append(1)
+
+ class TestClass:
+ def test_one(self):
+ assert self.hello == "world"
+ assert len(l) == 1
+ def test_two(self):
+ assert self.hello == "world"
+ assert len(l) == 1
+ pytest.mark.usefixtures("myfix")(TestClass)
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_usefixtures_ini(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ usefixtures = myfix
+ """)
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture(scope="class")
+ def myfix(request):
+ request.cls.hello = "world"
+
+ """)
+ testdir.makepyfile("""
+ class TestClass:
+ def test_one(self):
+ assert self.hello == "world"
+ def test_two(self):
+ assert self.hello == "world"
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_usefixtures_seen_in_showmarkers(self, testdir):
+ result = testdir.runpytest("--markers")
+ result.stdout.fnmatch_lines("""
+ *usefixtures(fixturename1*mark tests*fixtures*
+ """)
+
+ def test_request_instance_issue203(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ class TestClass:
+ @pytest.fixture
+ def setup1(self, request):
+ assert self == request.instance
+ self.arg1 = 1
+ def test_hello(self, setup1):
+ assert self.arg1 == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_fixture_parametrized_with_iterator(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ l = []
+ def f():
+ yield 1
+ yield 2
+ dec = pytest.fixture(scope="module", params=f())
+
+ @dec
+ def arg(request):
+ return request.param
+ @dec
+ def arg2(request):
+ return request.param
+
+ def test_1(arg):
+ l.append(arg)
+ def test_2(arg2):
+ l.append(arg2*10)
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=4)
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ assert l == [1,2, 10,20]
+
+
+class TestFixtureManagerParseFactories:
+ def pytest_funcarg__testdir(self, request):
+ testdir = request.getfuncargvalue("testdir")
+ testdir.makeconftest("""
+ def pytest_funcarg__hello(request):
+ return "conftest"
+
+ def pytest_funcarg__fm(request):
+ return request._fixturemanager
+
+ def pytest_funcarg__item(request):
+ return request._pyfuncitem
+ """)
+ return testdir
+
+ def test_parsefactories_evil_objects_issue214(self, testdir):
+ testdir.makepyfile("""
+ class A:
+ def __call__(self):
+ pass
+ def __getattr__(self, name):
+ raise RuntimeError()
+ a = A()
+ def test_hello():
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1, failed=0)
+
+ def test_parsefactories_conftest(self, testdir):
+ testdir.makepyfile("""
+ def test_hello(item, fm):
+ for name in ("fm", "hello", "item"):
+ faclist = fm.getfixturedefs(name, item.nodeid)
+ assert len(faclist) == 1
+ fac = faclist[0]
+ assert fac.func.__name__ == "pytest_funcarg__" + name
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=1)
+
+ def test_parsefactories_conftest_and_module_and_class(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__hello(request):
+ return "module"
+ class TestClass:
+ def pytest_funcarg__hello(self, request):
+ return "class"
+ def test_hello(self, item, fm):
+ faclist = fm.getfixturedefs("hello", item.nodeid)
+ print (faclist)
+ assert len(faclist) == 3
+ assert faclist[0].func(item._request) == "conftest"
+ assert faclist[1].func(item._request) == "module"
+ assert faclist[2].func(item._request) == "class"
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=1)
+
+ def test_parsefactories_relative_node_ids(self, testdir):
+ # example mostly taken from:
+ # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
+ runner = testdir.mkdir("runner")
+ package = testdir.mkdir("package")
+ package.join("conftest.py").write(dedent("""\
+ import pytest
+ @pytest.fixture
+ def one():
+ return 1
+ """))
+ package.join("test_x.py").write(dedent("""\
+ def test_x(one):
+ assert one == 1
+ """))
+ sub = package.mkdir("sub")
+ sub.join("__init__.py").ensure()
+ sub.join("conftest.py").write(dedent("""\
+ import pytest
+ @pytest.fixture
+ def one():
+ return 2
+ """))
+ sub.join("test_y.py").write(dedent("""\
+ def test_x(one):
+ assert one == 2
+ """))
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+ with runner.as_cwd():
+ reprec = testdir.inline_run("..")
+ reprec.assertoutcome(passed=2)
+
+
+class TestAutouseDiscovery:
+ def pytest_funcarg__testdir(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ @pytest.fixture(autouse=True)
+ def perfunction(request, tmpdir):
+ pass
+
+ @pytest.fixture()
+ def arg1(tmpdir):
+ pass
+ @pytest.fixture(autouse=True)
+ def perfunction2(arg1):
+ pass
+
+ def pytest_funcarg__fm(request):
+ return request._fixturemanager
+
+ def pytest_funcarg__item(request):
+ return request._pyfuncitem
+ """)
+ return testdir
+
+ def test_parsefactories_conftest(self, testdir):
+ testdir.makepyfile("""
+ from _pytest.pytester import get_public_names
+ def test_check_setup(item, fm):
+ autousenames = fm._getautousenames(item.nodeid)
+ assert len(get_public_names(autousenames)) == 2
+ assert "perfunction2" in autousenames
+ assert "perfunction" in autousenames
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=1)
+
+ def test_two_classes_separated_autouse(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ class TestA:
+ l = []
+ @pytest.fixture(autouse=True)
+ def setup1(self):
+ self.l.append(1)
+ def test_setup1(self):
+ assert self.l == [1]
+ class TestB:
+ l = []
+ @pytest.fixture(autouse=True)
+ def setup2(self):
+ self.l.append(1)
+ def test_setup2(self):
+ assert self.l == [1]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_setup_at_classlevel(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ class TestClass:
+ @pytest.fixture(autouse=True)
+ def permethod(self, request):
+ request.instance.funcname = request.function.__name__
+ def test_method1(self):
+ assert self.funcname == "test_method1"
+ def test_method2(self):
+ assert self.funcname == "test_method2"
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=2)
+
+ @pytest.mark.xfail(reason="'enabled' feature not implemented")
+ def test_setup_enabled_functionnode(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ def enabled(parentnode, markers):
+ return "needsdb" in markers
+
+ @pytest.fixture(params=[1,2])
+ def db(request):
+ return request.param
+
+ @pytest.fixture(enabled=enabled, autouse=True)
+ def createdb(db):
+ pass
+
+ def test_func1(request):
+ assert "db" not in request.fixturenames
+
+ @pytest.mark.needsdb
+ def test_func2(request):
+ assert "db" in request.fixturenames
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=2)
+
+ def test_callables_nocode(self, testdir):
+ """
+ a imported mock.call would break setup/factory discovery
+ due to it being callable and __code__ not being a code object
+ """
+ testdir.makepyfile("""
+ class _call(tuple):
+ def __call__(self, *k, **kw):
+ pass
+ def __getattr__(self, k):
+ return self
+
+ call = _call()
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(failed=0, passed=0)
+
+ def test_autouse_in_conftests(self, testdir):
+ a = testdir.mkdir("a")
+ b = testdir.mkdir("a1")
+ conftest = testdir.makeconftest("""
+ import pytest
+ @pytest.fixture(autouse=True)
+ def hello():
+ xxx
+ """)
+ conftest.move(a.join(conftest.basename))
+ a.join("test_something.py").write("def test_func(): pass")
+ b.join("test_otherthing.py").write("def test_func(): pass")
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *1 passed*1 error*
+ """)
+
+ def test_autouse_in_module_and_two_classes(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(autouse=True)
+ def append1():
+ l.append("module")
+ def test_x():
+ assert l == ["module"]
+
+ class TestA:
+ @pytest.fixture(autouse=True)
+ def append2(self):
+ l.append("A")
+ def test_hello(self):
+ assert l == ["module", "module", "A"], l
+ class TestA2:
+ def test_world(self):
+ assert l == ["module", "module", "A", "module"], l
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=3)
+
+
+class TestAutouseManagement:
+ def test_autouse_conftest_mid_directory(self, testdir):
+ pkgdir = testdir.mkpydir("xyz123")
+ pkgdir.join("conftest.py").write(_pytest._code.Source("""
+ import pytest
+ @pytest.fixture(autouse=True)
+ def app():
+ import sys
+ sys._myapp = "hello"
+ """))
+ t = pkgdir.ensure("tests", "test_app.py")
+ t.write(_pytest._code.Source("""
+ import sys
+ def test_app():
+ assert sys._myapp == "hello"
+ """))
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=1)
+
+ def test_autouse_honored_for_yield(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(autouse=True)
+ def tst():
+ global x
+ x = 3
+ def test_gen():
+ def f(hello):
+ assert x == abs(hello)
+ yield f, 3
+ yield f, -3
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+
+
+ def test_funcarg_and_setup(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(scope="module")
+ def arg():
+ l.append(1)
+ return 0
+ @pytest.fixture(scope="module", autouse=True)
+ def something(arg):
+ l.append(2)
+
+ def test_hello(arg):
+ assert len(l) == 2
+ assert l == [1,2]
+ assert arg == 0
+
+ def test_hello2(arg):
+ assert len(l) == 2
+ assert l == [1,2]
+ assert arg == 0
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_uses_parametrized_resource(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(params=[1,2])
+ def arg(request):
+ return request.param
+
+ @pytest.fixture(autouse=True)
+ def something(arg):
+ l.append(arg)
+
+ def test_hello():
+ if len(l) == 1:
+ assert l == [1]
+ elif len(l) == 2:
+ assert l == [1, 2]
+ else:
+ 0/0
+
+ """)
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=2)
+
+ def test_session_parametrized_function(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ l = []
+
+ @pytest.fixture(scope="session", params=[1,2])
+ def arg(request):
+ return request.param
+
+ @pytest.fixture(scope="function", autouse=True)
+ def append(request, arg):
+ if request.function.__name__ == "test_some":
+ l.append(arg)
+
+ def test_some():
+ pass
+
+ def test_result(arg):
+ assert len(l) == arg
+ assert l[:arg] == [1,2][:arg]
+ """)
+ reprec = testdir.inline_run("-v", "-s")
+ reprec.assertoutcome(passed=4)
+
+ def test_class_function_parametrization_finalization(self, testdir):
+ p = testdir.makeconftest("""
+ import pytest
+ import pprint
+
+ l = []
+
+ @pytest.fixture(scope="function", params=[1,2])
+ def farg(request):
+ return request.param
+
+ @pytest.fixture(scope="class", params=list("ab"))
+ def carg(request):
+ return request.param
+
+ @pytest.fixture(scope="function", autouse=True)
+ def append(request, farg, carg):
+ def fin():
+ l.append("fin_%s%s" % (carg, farg))
+ request.addfinalizer(fin)
+ """)
+ testdir.makepyfile("""
+ import pytest
+
+ class TestClass:
+ def test_1(self):
+ pass
+ class TestClass2:
+ def test_2(self):
+ pass
+ """)
+ reprec = testdir.inline_run("-v","-s")
+ reprec.assertoutcome(passed=8)
+ config = reprec.getcalls("pytest_unconfigure")[0].config
+ l = config.pluginmanager._getconftestmodules(p)[0].l
+ assert l == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2
+
+ def test_scope_ordering(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(scope="function", autouse=True)
+ def fappend2():
+ l.append(2)
+ @pytest.fixture(scope="class", autouse=True)
+ def classappend3():
+ l.append(3)
+ @pytest.fixture(scope="module", autouse=True)
+ def mappend():
+ l.append(1)
+
+ class TestHallo:
+ def test_method(self):
+ assert l == [1,3,2]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_parametrization_setup_teardown_ordering(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ def pytest_generate_tests(metafunc):
+ if metafunc.cls is not None:
+ metafunc.parametrize("item", [1,2], scope="class")
+ class TestClass:
+ @pytest.fixture(scope="class", autouse=True)
+ def addteardown(self, item, request):
+ l.append("setup-%d" % item)
+ request.addfinalizer(lambda: l.append("teardown-%d" % item))
+ def test_step1(self, item):
+ l.append("step1-%d" % item)
+ def test_step2(self, item):
+ l.append("step2-%d" % item)
+
+ def test_finish():
+ print (l)
+ assert l == ["setup-1", "step1-1", "step2-1", "teardown-1",
+ "setup-2", "step1-2", "step2-2", "teardown-2",]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=5)
+
+ def test_ordering_autouse_before_explicit(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ l = []
+ @pytest.fixture(autouse=True)
+ def fix1():
+ l.append(1)
+ @pytest.fixture()
+ def arg1():
+ l.append(2)
+ def test_hello(arg1):
+ assert l == [1,2]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ @pytest.mark.issue226
+ @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00","p01"])
+ @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10","p11"])
+ def test_ordering_dependencies_torndown_first(self, testdir, param1, param2):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(%(param1)s)
+ def arg1(request):
+ request.addfinalizer(lambda: l.append("fin1"))
+ l.append("new1")
+ @pytest.fixture(%(param2)s)
+ def arg2(request, arg1):
+ request.addfinalizer(lambda: l.append("fin2"))
+ l.append("new2")
+
+ def test_arg(arg2):
+ pass
+ def test_check():
+ assert l == ["new1", "new2", "fin2", "fin1"]
+ """ % locals())
+ reprec = testdir.inline_run("-s")
+ reprec.assertoutcome(passed=2)
+
+
+class TestFixtureMarker:
+ def test_parametrize(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(params=["a", "b", "c"])
+ def arg(request):
+ return request.param
+ l = []
+ def test_param(arg):
+ l.append(arg)
+ def test_result():
+ assert l == list("abc")
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=4)
+
+ def test_multiple_parametrization_issue_736(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(params=[1,2,3])
+ def foo(request):
+ return request.param
+
+ @pytest.mark.parametrize('foobar', [4,5,6])
+ def test_issue(foo, foobar):
+ assert foo in [1,2,3]
+ assert foobar in [4,5,6]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=9)
+
+ @pytest.mark.parametrize('param_args', ["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"])
+ def test_override_parametrized_fixture_issue_979(self, testdir, param_args):
+ """Make sure a parametrized argument can override a parametrized fixture.
+
+ This was a regression introduced in the fix for #736.
+ """
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(params=[1, 2])
+ def fixt(request):
+ return request.param
+
+ @pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')])
+ def test_foo(fixt, val):
+ pass
+ """ % param_args)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_scope_session(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(scope="module")
+ def arg():
+ l.append(1)
+ return 1
+
+ def test_1(arg):
+ assert arg == 1
+ def test_2(arg):
+ assert arg == 1
+ assert len(l) == 1
+ class TestClass:
+ def test3(self, arg):
+ assert arg == 1
+ assert len(l) == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=3)
+
+ def test_scope_session_exc(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(scope="session")
+ def fix():
+ l.append(1)
+ pytest.skip('skipping')
+
+ def test_1(fix):
+ pass
+ def test_2(fix):
+ pass
+ def test_last():
+ assert l == [1]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(skipped=2, passed=1)
+
+ def test_scope_session_exc_two_fix(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ m = []
+ @pytest.fixture(scope="session")
+ def a():
+ l.append(1)
+ pytest.skip('skipping')
+ @pytest.fixture(scope="session")
+ def b(a):
+ m.append(1)
+
+ def test_1(b):
+ pass
+ def test_2(b):
+ pass
+ def test_last():
+ assert l == [1]
+ assert m == []
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(skipped=2, passed=1)
+
+ def test_scope_exc(self, testdir):
+ testdir.makepyfile(
+ test_foo="""
+ def test_foo(fix):
+ pass
+ """,
+ test_bar="""
+ def test_bar(fix):
+ pass
+ """,
+ conftest="""
+ import pytest
+ reqs = []
+ @pytest.fixture(scope="session")
+ def fix(request):
+ reqs.append(1)
+ pytest.skip()
+ @pytest.fixture
+ def req_list():
+ return reqs
+ """,
+ test_real="""
+ def test_last(req_list):
+ assert req_list == [1]
+ """
+ )
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(skipped=2, passed=1)
+
+ def test_scope_module_uses_session(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(scope="module")
+ def arg():
+ l.append(1)
+ return 1
+
+ def test_1(arg):
+ assert arg == 1
+ def test_2(arg):
+ assert arg == 1
+ assert len(l) == 1
+ class TestClass:
+ def test3(self, arg):
+ assert arg == 1
+ assert len(l) == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=3)
+
+ def test_scope_module_and_finalizer(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ finalized = []
+ created = []
+ @pytest.fixture(scope="module")
+ def arg(request):
+ created.append(1)
+ assert request.scope == "module"
+ request.addfinalizer(lambda: finalized.append(1))
+ def pytest_funcarg__created(request):
+ return len(created)
+ def pytest_funcarg__finalized(request):
+ return len(finalized)
+ """)
+ testdir.makepyfile(
+ test_mod1="""
+ def test_1(arg, created, finalized):
+ assert created == 1
+ assert finalized == 0
+ def test_2(arg, created, finalized):
+ assert created == 1
+ assert finalized == 0""",
+ test_mod2="""
+ def test_3(arg, created, finalized):
+ assert created == 2
+ assert finalized == 1""",
+ test_mode3="""
+ def test_4(arg, created, finalized):
+ assert created == 3
+ assert finalized == 2
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=4)
+
+ @pytest.mark.parametrize("method", [
+ 'request.getfuncargvalue("arg")',
+ 'request.cached_setup(lambda: None, scope="function")',
+ ], ids=["getfuncargvalue", "cached_setup"])
+ def test_scope_mismatch_various(self, testdir, method):
+ testdir.makeconftest("""
+ import pytest
+ finalized = []
+ created = []
+ @pytest.fixture(scope="function")
+ def arg(request):
+ pass
+ """)
+ testdir.makepyfile(
+ test_mod1="""
+ import pytest
+ @pytest.fixture(scope="session")
+ def arg(request):
+ %s
+ def test_1(arg):
+ pass
+ """ % method)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*ScopeMismatch*You tried*function*session*request*",
+ ])
+
+ def test_register_only_with_mark(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ @pytest.fixture()
+ def arg():
+ return 1
+ """)
+ testdir.makepyfile(
+ test_mod1="""
+ import pytest
+ @pytest.fixture()
+ def arg(arg):
+ return arg + 1
+ def test_1(arg):
+ assert arg == 2
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_parametrize_and_scope(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope="module", params=["a", "b", "c"])
+ def arg(request):
+ return request.param
+ l = []
+ def test_param(arg):
+ l.append(arg)
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=3)
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ assert len(l) == 3
+ assert "a" in l
+ assert "b" in l
+ assert "c" in l
+
+ def test_scope_mismatch(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ @pytest.fixture(scope="function")
+ def arg(request):
+ pass
+ """)
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope="session")
+ def arg(arg):
+ pass
+ def test_mismatch(arg):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ScopeMismatch*",
+ "*1 error*",
+ ])
+
+ def test_parametrize_separated_order(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope="module", params=[1, 2])
+ def arg(request):
+ return request.param
+
+ l = []
+ def test_1(arg):
+ l.append(arg)
+ def test_2(arg):
+ l.append(arg)
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=4)
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ assert l == [1,1,2,2]
+
+ def test_module_parametrized_ordering(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture(scope="session", params="s1 s2".split())
+ def sarg():
+ pass
+ @pytest.fixture(scope="module", params="m1 m2".split())
+ def marg():
+ pass
+ """)
+ testdir.makepyfile(test_mod1="""
+ def test_func(sarg):
+ pass
+ def test_func1(marg):
+ pass
+ """, test_mod2="""
+ def test_func2(sarg):
+ pass
+ def test_func3(sarg, marg):
+ pass
+ def test_func3b(sarg, marg):
+ pass
+ def test_func4(marg):
+ pass
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines("""
+ test_mod1.py::test_func[s1] PASSED
+ test_mod2.py::test_func2[s1] PASSED
+ test_mod2.py::test_func3[s1-m1] PASSED
+ test_mod2.py::test_func3b[s1-m1] PASSED
+ test_mod2.py::test_func3[s1-m2] PASSED
+ test_mod2.py::test_func3b[s1-m2] PASSED
+ test_mod1.py::test_func[s2] PASSED
+ test_mod2.py::test_func2[s2] PASSED
+ test_mod2.py::test_func3[s2-m1] PASSED
+ test_mod2.py::test_func3b[s2-m1] PASSED
+ test_mod2.py::test_func4[m1] PASSED
+ test_mod2.py::test_func3[s2-m2] PASSED
+ test_mod2.py::test_func3b[s2-m2] PASSED
+ test_mod2.py::test_func4[m2] PASSED
+ test_mod1.py::test_func1[m1] PASSED
+ test_mod1.py::test_func1[m2] PASSED
+ """)
+
+ def test_class_ordering(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+
+ l = []
+
+ @pytest.fixture(scope="function", params=[1,2])
+ def farg(request):
+ return request.param
+
+ @pytest.fixture(scope="class", params=list("ab"))
+ def carg(request):
+ return request.param
+
+ @pytest.fixture(scope="function", autouse=True)
+ def append(request, farg, carg):
+ def fin():
+ l.append("fin_%s%s" % (carg, farg))
+ request.addfinalizer(fin)
+ """)
+ testdir.makepyfile("""
+ import pytest
+
+ class TestClass2:
+ def test_1(self):
+ pass
+ def test_2(self):
+ pass
+ class TestClass:
+ def test_3(self):
+ pass
+ """)
+ result = testdir.runpytest("-vs")
+ result.stdout.fnmatch_lines("""
+ test_class_ordering.py::TestClass2::test_1[1-a] PASSED
+ test_class_ordering.py::TestClass2::test_1[2-a] PASSED
+ test_class_ordering.py::TestClass2::test_2[1-a] PASSED
+ test_class_ordering.py::TestClass2::test_2[2-a] PASSED
+ test_class_ordering.py::TestClass2::test_1[1-b] PASSED
+ test_class_ordering.py::TestClass2::test_1[2-b] PASSED
+ test_class_ordering.py::TestClass2::test_2[1-b] PASSED
+ test_class_ordering.py::TestClass2::test_2[2-b] PASSED
+ test_class_ordering.py::TestClass::test_3[1-a] PASSED
+ test_class_ordering.py::TestClass::test_3[2-a] PASSED
+ test_class_ordering.py::TestClass::test_3[1-b] PASSED
+ test_class_ordering.py::TestClass::test_3[2-b] PASSED
+ """)
+
+ def test_parametrize_separated_order_higher_scope_first(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope="function", params=[1, 2])
+ def arg(request):
+ param = request.param
+ request.addfinalizer(lambda: l.append("fin:%s" % param))
+ l.append("create:%s" % param)
+ return request.param
+
+ @pytest.fixture(scope="module", params=["mod1", "mod2"])
+ def modarg(request):
+ param = request.param
+ request.addfinalizer(lambda: l.append("fin:%s" % param))
+ l.append("create:%s" % param)
+ return request.param
+
+ l = []
+ def test_1(arg):
+ l.append("test1")
+ def test_2(modarg):
+ l.append("test2")
+ def test_3(arg, modarg):
+ l.append("test3")
+ def test_4(modarg, arg):
+ l.append("test4")
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=12)
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ expected = [
+ 'create:1', 'test1', 'fin:1', 'create:2', 'test1',
+ 'fin:2', 'create:mod1', 'test2', 'create:1', 'test3',
+ 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1',
+ 'test4', 'fin:1', 'create:2', 'test4', 'fin:2',
+ 'fin:mod1', 'create:mod2', 'test2', 'create:1', 'test3',
+ 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1',
+ 'test4', 'fin:1', 'create:2', 'test4', 'fin:2',
+ 'fin:mod2']
+ import pprint
+ pprint.pprint(list(zip(l, expected)))
+ assert l == expected
+
+ def test_parametrized_fixture_teardown_order(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(params=[1,2], scope="class")
+ def param1(request):
+ return request.param
+
+ l = []
+
+ class TestClass:
+ @classmethod
+ @pytest.fixture(scope="class", autouse=True)
+ def setup1(self, request, param1):
+ l.append(1)
+ request.addfinalizer(self.teardown1)
+ @classmethod
+ def teardown1(self):
+ assert l.pop() == 1
+ @pytest.fixture(scope="class", autouse=True)
+ def setup2(self, request, param1):
+ l.append(2)
+ request.addfinalizer(self.teardown2)
+ @classmethod
+ def teardown2(self):
+ assert l.pop() == 2
+ def test(self):
+ pass
+
+ def test_finish():
+ assert not l
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines("""
+ *3 passed*
+ """)
+ assert "error" not in result.stdout.str()
+
+ def test_fixture_finalizer(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ import sys
+
+ @pytest.fixture
+ def browser(request):
+
+ def finalize():
+ sys.stdout.write('Finalized')
+ request.addfinalizer(finalize)
+ return {}
+ """)
+ b = testdir.mkdir("subdir")
+ b.join("test_overriden_fixture_finalizer.py").write(dedent("""
+ import pytest
+ @pytest.fixture
+ def browser(browser):
+ browser['visited'] = True
+ return browser
+
+ def test_browser(browser):
+ assert browser['visited'] is True
+ """))
+ reprec = testdir.runpytest("-s")
+ for test in ['test_browser']:
+ reprec.stdout.fnmatch_lines('*Finalized*')
+
+ def test_class_scope_with_normal_tests(self, testdir):
+ testpath = testdir.makepyfile("""
+ import pytest
+
+ class Box:
+ value = 0
+
+ @pytest.fixture(scope='class')
+ def a(request):
+ Box.value += 1
+ return Box.value
+
+ def test_a(a):
+ assert a == 1
+
+ class Test1:
+ def test_b(self, a):
+ assert a == 2
+
+ class Test2:
+ def test_c(self, a):
+ assert a == 3""")
+ reprec = testdir.inline_run(testpath)
+ for test in ['test_a', 'test_b', 'test_c']:
+ assert reprec.matchreport(test).passed
+
+ def test_request_is_clean(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(params=[1, 2])
+ def fix(request):
+ request.addfinalizer(lambda: l.append(request.param))
+ def test_fix(fix):
+ pass
+ """)
+ reprec = testdir.inline_run("-s")
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ assert l == [1,2]
+
+ def test_parametrize_separated_lifecycle(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ l = []
+ @pytest.fixture(scope="module", params=[1, 2])
+ def arg(request):
+ x = request.param
+ request.addfinalizer(lambda: l.append("fin%s" % x))
+ return request.param
+ def test_1(arg):
+ l.append(arg)
+ def test_2(arg):
+ l.append(arg)
+ """)
+ reprec = testdir.inline_run("-vs")
+ reprec.assertoutcome(passed=4)
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ import pprint
+ pprint.pprint(l)
+ #assert len(l) == 6
+ assert l[0] == l[1] == 1
+ assert l[2] == "fin1"
+ assert l[3] == l[4] == 2
+ assert l[5] == "fin2"
+
+ def test_parametrize_function_scoped_finalizers_called(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope="function", params=[1, 2])
+ def arg(request):
+ x = request.param
+ request.addfinalizer(lambda: l.append("fin%s" % x))
+ return request.param
+
+ l = []
+ def test_1(arg):
+ l.append(arg)
+ def test_2(arg):
+ l.append(arg)
+ def test_3():
+ assert len(l) == 8
+ assert l == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"]
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=5)
+
+
+ @pytest.mark.issue246
+ @pytest.mark.parametrize("scope", ["session", "function", "module"])
+ def test_finalizer_order_on_parametrization(self, scope, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+
+ @pytest.fixture(scope=%(scope)r, params=["1"])
+ def fix1(request):
+ return request.param
+
+ @pytest.fixture(scope=%(scope)r)
+ def fix2(request, base):
+ def cleanup_fix2():
+ assert not l, "base should not have been finalized"
+ request.addfinalizer(cleanup_fix2)
+
+ @pytest.fixture(scope=%(scope)r)
+ def base(request, fix1):
+ def cleanup_base():
+ l.append("fin_base")
+ print ("finalizing base")
+ request.addfinalizer(cleanup_base)
+
+ def test_begin():
+ pass
+ def test_baz(base, fix2):
+ pass
+ def test_other():
+ pass
+ """ % {"scope": scope})
+ reprec = testdir.inline_run("-lvs")
+ reprec.assertoutcome(passed=3)
+
+ @pytest.mark.issue396
+ def test_class_scope_parametrization_ordering(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ @pytest.fixture(params=["John", "Doe"], scope="class")
+ def human(request):
+ request.addfinalizer(lambda: l.append("fin %s" % request.param))
+ return request.param
+
+ class TestGreetings:
+ def test_hello(self, human):
+ l.append("test_hello")
+
+ class TestMetrics:
+ def test_name(self, human):
+ l.append("test_name")
+
+ def test_population(self, human):
+ l.append("test_population")
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=6)
+ l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
+ assert l == ["test_hello", "fin John", "test_hello", "fin Doe",
+ "test_name", "test_population", "fin John",
+ "test_name", "test_population", "fin Doe"]
+
+ def test_parametrize_setup_function(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope="module", params=[1, 2])
+ def arg(request):
+ return request.param
+
+ @pytest.fixture(scope="module", autouse=True)
+ def mysetup(request, arg):
+ request.addfinalizer(lambda: l.append("fin%s" % arg))
+ l.append("setup%s" % arg)
+
+ l = []
+ def test_1(arg):
+ l.append(arg)
+ def test_2(arg):
+ l.append(arg)
+ def test_3():
+ import pprint
+ pprint.pprint(l)
+ if arg == 1:
+ assert l == ["setup1", 1, 1, ]
+ elif arg == 2:
+ assert l == ["setup1", 1, 1, "fin1",
+ "setup2", 2, 2, ]
+
+ """)
+ reprec = testdir.inline_run("-v")
+ reprec.assertoutcome(passed=6)
+
+ def test_fixture_marked_function_not_collected_as_test(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture
+ def test_app():
+ return 1
+
+ def test_something(test_app):
+ assert test_app == 1
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_params_and_ids(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(params=[object(), object()],
+ ids=['alpha', 'beta'])
+ def fix(request):
+ return request.param
+
+ def test_foo(fix):
+ assert 1
+ """)
+ res = testdir.runpytest('-v')
+ res.stdout.fnmatch_lines([
+ '*test_foo*alpha*',
+ '*test_foo*beta*'])
+
+ def test_params_and_ids_yieldfixture(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.yield_fixture(params=[object(), object()],
+ ids=['alpha', 'beta'])
+ def fix(request):
+ yield request.param
+
+ def test_foo(fix):
+ assert 1
+ """)
+ res = testdir.runpytest('-v')
+ res.stdout.fnmatch_lines([
+ '*test_foo*alpha*',
+ '*test_foo*beta*'])
+
+
+class TestRequestScopeAccess:
+ pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[
+ ["session", "", "fspath class function module"],
+ ["module", "module fspath", "cls function"],
+ ["class", "module fspath cls", "function"],
+ ["function", "module fspath cls function", ""]
+ ])
+
+ def test_setup(self, testdir, scope, ok, error):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope=%r, autouse=True)
+ def myscoped(request):
+ for x in %r:
+ assert hasattr(request, x)
+ for x in %r:
+ pytest.raises(AttributeError, lambda:
+ getattr(request, x))
+ assert request.session
+ assert request.config
+ def test_func():
+ pass
+ """ %(scope, ok.split(), error.split()))
+ reprec = testdir.inline_run("-l")
+ reprec.assertoutcome(passed=1)
+
+ def test_funcarg(self, testdir, scope, ok, error):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope=%r)
+ def arg(request):
+ for x in %r:
+ assert hasattr(request, x)
+ for x in %r:
+ pytest.raises(AttributeError, lambda:
+ getattr(request, x))
+ assert request.session
+ assert request.config
+ def test_func(arg):
+ pass
+ """ %(scope, ok.split(), error.split()))
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+class TestErrors:
+ def test_subfactory_missing_funcarg(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture()
+ def gen(qwe123):
+ return 1
+ def test_something(gen):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*def gen(qwe123):*",
+ "*fixture*qwe123*not found*",
+ "*1 error*",
+ ])
+
+ def test_issue498_fixture_finalizer_failing(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture
+ def fix1(request):
+ def f():
+ raise KeyError
+ request.addfinalizer(f)
+ return object()
+
+ l = []
+ def test_1(fix1):
+ l.append(fix1)
+ def test_2(fix1):
+ l.append(fix1)
+ def test_3():
+ assert l[0] != l[1]
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *ERROR*teardown*test_1*
+ *KeyError*
+ *ERROR*teardown*test_2*
+ *KeyError*
+ *3 pass*2 error*
+ """)
+
+
+
+ def test_setupfunc_missing_funcarg(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(autouse=True)
+ def gen(qwe123):
+ return 1
+ def test_something():
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*def gen(qwe123):*",
+ "*fixture*qwe123*not found*",
+ "*1 error*",
+ ])
+
+class TestShowFixtures:
+ def test_funcarg_compat(self, testdir):
+ config = testdir.parseconfigure("--funcargs")
+ assert config.option.showfixtures
+
+ def test_show_fixtures(self, testdir):
+ result = testdir.runpytest("--fixtures")
+ result.stdout.fnmatch_lines([
+ "*tmpdir*",
+ "*temporary directory*",
+ ]
+ )
+
+ def test_show_fixtures_verbose(self, testdir):
+ result = testdir.runpytest("--fixtures", "-v")
+ result.stdout.fnmatch_lines([
+ "*tmpdir*--*tmpdir.py*",
+ "*temporary directory*",
+ ]
+ )
+
+ def test_show_fixtures_testmodule(self, testdir):
+ p = testdir.makepyfile('''
+ import pytest
+ @pytest.fixture
+ def _arg0():
+ """ hidden """
+ @pytest.fixture
+ def arg1():
+ """ hello world """
+ ''')
+ result = testdir.runpytest("--fixtures", p)
+ result.stdout.fnmatch_lines("""
+ *tmpdir
+ *fixtures defined from*
+ *arg1*
+ *hello world*
+ """)
+ assert "arg0" not in result.stdout.str()
+
+ @pytest.mark.parametrize("testmod", [True, False])
+ def test_show_fixtures_conftest(self, testdir, testmod):
+ testdir.makeconftest('''
+ import pytest
+ @pytest.fixture
+ def arg1():
+ """ hello world """
+ ''')
+ if testmod:
+ testdir.makepyfile("""
+ def test_hello():
+ pass
+ """)
+ result = testdir.runpytest("--fixtures")
+ result.stdout.fnmatch_lines("""
+ *tmpdir*
+ *fixtures defined from*conftest*
+ *arg1*
+ *hello world*
+ """)
+
+ def test_show_fixtures_trimmed_doc(self, testdir):
+ p = testdir.makepyfile('''
+ import pytest
+ @pytest.fixture
+ def arg1():
+ """
+ line1
+ line2
+
+ """
+ @pytest.fixture
+ def arg2():
+ """
+ line1
+ line2
+
+ """
+ ''')
+ result = testdir.runpytest("--fixtures", p)
+ result.stdout.fnmatch_lines("""
+ * fixtures defined from test_show_fixtures_trimmed_doc *
+ arg2
+ line1
+ line2
+ arg1
+ line1
+ line2
+
+ """)
+
+
+ def test_show_fixtures_different_files(self, testdir):
+ """
+ #833: --fixtures only shows fixtures from first file
+ """
+ testdir.makepyfile(test_a='''
+ import pytest
+
+ @pytest.fixture
+ def fix_a():
+ """Fixture A"""
+ pass
+
+ def test_a(fix_a):
+ pass
+ ''')
+ testdir.makepyfile(test_b='''
+ import pytest
+
+ @pytest.fixture
+ def fix_b():
+ """Fixture B"""
+ pass
+
+ def test_b(fix_b):
+ pass
+ ''')
+ result = testdir.runpytest("--fixtures")
+ result.stdout.fnmatch_lines("""
+ * fixtures defined from test_a *
+ fix_a
+ Fixture A
+
+ * fixtures defined from test_b *
+ fix_b
+ Fixture B
+ """)
+
+
+class TestContextManagerFixtureFuncs:
+ def test_simple(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.yield_fixture
+ def arg1():
+ print ("setup")
+ yield 1
+ print ("teardown")
+ def test_1(arg1):
+ print ("test1 %s" % arg1)
+ def test_2(arg1):
+ print ("test2 %s" % arg1)
+ assert 0
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *setup*
+ *test1 1*
+ *teardown*
+ *setup*
+ *test2 1*
+ *teardown*
+ """)
+
+ def test_scoped(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.yield_fixture(scope="module")
+ def arg1():
+ print ("setup")
+ yield 1
+ print ("teardown")
+ def test_1(arg1):
+ print ("test1 %s" % arg1)
+ def test_2(arg1):
+ print ("test2 %s" % arg1)
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *setup*
+ *test1 1*
+ *test2 1*
+ *teardown*
+ """)
+
+ def test_setup_exception(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.yield_fixture(scope="module")
+ def arg1():
+ pytest.fail("setup")
+ yield 1
+ def test_1(arg1):
+ pass
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *pytest.fail*setup*
+ *1 error*
+ """)
+
+ def test_teardown_exception(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.yield_fixture(scope="module")
+ def arg1():
+ yield 1
+ pytest.fail("teardown")
+ def test_1(arg1):
+ pass
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *pytest.fail*teardown*
+ *1 passed*1 error*
+ """)
+
+ def test_yields_more_than_one(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.yield_fixture(scope="module")
+ def arg1():
+ yield 1
+ yield 2
+ def test_1(arg1):
+ pass
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *fixture function*
+ *test_yields*:2*
+ """)
+
+
+ def test_no_yield(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.yield_fixture(scope="module")
+ def arg1():
+ return 1
+ def test_1(arg1):
+ pass
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *yield_fixture*requires*yield*
+ *yield_fixture*
+ *def arg1*
+ """)
+
+ def test_yield_not_allowed_in_non_yield(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope="module")
+ def arg1():
+ yield 1
+ def test_1(arg1):
+ pass
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *fixture*cannot use*yield*
+ *def arg1*
+ """)
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/python/integration.py b/testing/web-platform/tests/tools/pytest/testing/python/integration.py
new file mode 100644
index 000000000..dea86f942
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/python/integration.py
@@ -0,0 +1,369 @@
+import pytest
+from _pytest import python
+from _pytest import runner
+
+
+class TestOEJSKITSpecials:
+ def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage
+ testdir.makeconftest("""
+ import pytest
+ def pytest_pycollect_makeitem(collector, name, obj):
+ if name == "MyClass":
+ return MyCollector(name, parent=collector)
+ class MyCollector(pytest.Collector):
+ def reportinfo(self):
+ return self.fspath, 3, "xyz"
+ """)
+ modcol = testdir.getmodulecol("""
+ def pytest_funcarg__arg1(request):
+ return 42
+ class MyClass:
+ pass
+ """)
+ # this hook finds funcarg factories
+ rep = runner.collect_one_node(collector=modcol)
+ clscol = rep.result[0]
+ clscol.obj = lambda arg1: None
+ clscol.funcargs = {}
+ pytest._fillfuncargs(clscol)
+ assert clscol.funcargs['arg1'] == 42
+
+ def test_autouse_fixture(self, testdir): # rough jstests usage
+ testdir.makeconftest("""
+ import pytest
+ def pytest_pycollect_makeitem(collector, name, obj):
+ if name == "MyClass":
+ return MyCollector(name, parent=collector)
+ class MyCollector(pytest.Collector):
+ def reportinfo(self):
+ return self.fspath, 3, "xyz"
+ """)
+ modcol = testdir.getmodulecol("""
+ import pytest
+ @pytest.fixture(autouse=True)
+ def hello():
+ pass
+ def pytest_funcarg__arg1(request):
+ return 42
+ class MyClass:
+ pass
+ """)
+ # this hook finds funcarg factories
+ rep = runner.collect_one_node(modcol)
+ clscol = rep.result[0]
+ clscol.obj = lambda: None
+ clscol.funcargs = {}
+ pytest._fillfuncargs(clscol)
+ assert not clscol.funcargs
+
+
+def test_wrapped_getfslineno():
+ def func():
+ pass
+ def wrap(f):
+ func.__wrapped__ = f
+ func.patchings = ["qwe"]
+ return func
+ @wrap
+ def wrapped_func(x, y, z):
+ pass
+ fs, lineno = python.getfslineno(wrapped_func)
+ fs2, lineno2 = python.getfslineno(wrap)
+ assert lineno > lineno2, "getfslineno does not unwrap correctly"
+
+class TestMockDecoration:
+ def test_wrapped_getfuncargnames(self):
+ from _pytest.python import getfuncargnames
+ def wrap(f):
+ def func():
+ pass
+ func.__wrapped__ = f
+ return func
+ @wrap
+ def f(x):
+ pass
+ l = getfuncargnames(f)
+ assert l == ("x",)
+
+ def test_wrapped_getfuncargnames_patching(self):
+ from _pytest.python import getfuncargnames
+ def wrap(f):
+ def func():
+ pass
+ func.__wrapped__ = f
+ func.patchings = ["qwe"]
+ return func
+ @wrap
+ def f(x, y, z):
+ pass
+ l = getfuncargnames(f)
+ assert l == ("y", "z")
+
+ def test_unittest_mock(self, testdir):
+ pytest.importorskip("unittest.mock")
+ testdir.makepyfile("""
+ import unittest.mock
+ class T(unittest.TestCase):
+ @unittest.mock.patch("os.path.abspath")
+ def test_hello(self, abspath):
+ import os
+ os.path.abspath("hello")
+ abspath.assert_any_call("hello")
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_unittest_mock_and_fixture(self, testdir):
+ pytest.importorskip("unittest.mock")
+ testdir.makepyfile("""
+ import os.path
+ import unittest.mock
+ import pytest
+
+ @pytest.fixture
+ def inject_me():
+ pass
+
+ @unittest.mock.patch.object(os.path, "abspath",
+ new=unittest.mock.MagicMock)
+ def test_hello(inject_me):
+ import os
+ os.path.abspath("hello")
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_mock(self, testdir):
+ pytest.importorskip("mock", "1.0.1")
+ testdir.makepyfile("""
+ import os
+ import unittest
+ import mock
+
+ class T(unittest.TestCase):
+ @mock.patch("os.path.abspath")
+ def test_hello(self, abspath):
+ os.path.abspath("hello")
+ abspath.assert_any_call("hello")
+ def mock_basename(path):
+ return "mock_basename"
+ @mock.patch("os.path.abspath")
+ @mock.patch("os.path.normpath")
+ @mock.patch("os.path.basename", new=mock_basename)
+ def test_someting(normpath, abspath, tmpdir):
+ abspath.return_value = "this"
+ os.path.normpath(os.path.abspath("hello"))
+ normpath.assert_any_call("this")
+ assert os.path.basename("123") == "mock_basename"
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+ calls = reprec.getcalls("pytest_runtest_logreport")
+ funcnames = [call.report.location[2] for call in calls
+ if call.report.when == "call"]
+ assert funcnames == ["T.test_hello", "test_someting"]
+
+ def test_mock_sorting(self, testdir):
+ pytest.importorskip("mock", "1.0.1")
+ testdir.makepyfile("""
+ import os
+ import mock
+
+ @mock.patch("os.path.abspath")
+ def test_one(abspath):
+ pass
+ @mock.patch("os.path.abspath")
+ def test_two(abspath):
+ pass
+ @mock.patch("os.path.abspath")
+ def test_three(abspath):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ calls = reprec.getreports("pytest_runtest_logreport")
+ calls = [x for x in calls if x.when == "call"]
+ names = [x.nodeid.split("::")[-1] for x in calls]
+ assert names == ["test_one", "test_two", "test_three"]
+
+ def test_mock_double_patch_issue473(self, testdir):
+ pytest.importorskip("mock", "1.0.1")
+ testdir.makepyfile("""
+ from mock import patch
+ from pytest import mark
+
+ @patch('os.getcwd')
+ @patch('os.path')
+ @mark.slow
+ class TestSimple:
+ def test_simple_thing(self, mock_path, mock_getcwd):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+class TestReRunTests:
+ def test_rerun(self, testdir):
+ testdir.makeconftest("""
+ from _pytest.runner import runtestprotocol
+ def pytest_runtest_protocol(item, nextitem):
+ runtestprotocol(item, log=False, nextitem=nextitem)
+ runtestprotocol(item, log=True, nextitem=nextitem)
+ """)
+ testdir.makepyfile("""
+ import pytest
+ count = 0
+ req = None
+ @pytest.fixture
+ def fix(request):
+ global count, req
+ assert request != req
+ req = request
+ print ("fix count %s" % count)
+ count += 1
+ def test_fix(fix):
+ pass
+ """)
+ result = testdir.runpytest("-s")
+ result.stdout.fnmatch_lines("""
+ *fix count 0*
+ *fix count 1*
+ """)
+ result.stdout.fnmatch_lines("""
+ *2 passed*
+ """)
+
+def test_pytestconfig_is_session_scoped():
+ from _pytest.python import pytestconfig
+ assert pytestconfig._pytestfixturefunction.scope == "session"
+
+
+class TestNoselikeTestAttribute:
+ def test_module_with_global_test(self, testdir):
+ testdir.makepyfile("""
+ __test__ = False
+ def test_hello():
+ pass
+ """)
+ reprec = testdir.inline_run()
+ assert not reprec.getfailedcollections()
+ calls = reprec.getreports("pytest_runtest_logreport")
+ assert not calls
+
+ def test_class_and_method(self, testdir):
+ testdir.makepyfile("""
+ __test__ = True
+ def test_func():
+ pass
+ test_func.__test__ = False
+
+ class TestSome:
+ __test__ = False
+ def test_method(self):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ assert not reprec.getfailedcollections()
+ calls = reprec.getreports("pytest_runtest_logreport")
+ assert not calls
+
+ def test_unittest_class(self, testdir):
+ testdir.makepyfile("""
+ import unittest
+ class TC(unittest.TestCase):
+ def test_1(self):
+ pass
+ class TC2(unittest.TestCase):
+ __test__ = False
+ def test_2(self):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ assert not reprec.getfailedcollections()
+ call = reprec.getcalls("pytest_collection_modifyitems")[0]
+ assert len(call.items) == 1
+ assert call.items[0].cls.__name__ == "TC"
+
+ def test_class_with_nasty_getattr(self, testdir):
+ """Make sure we handle classes with a custom nasty __getattr__ right.
+
+ With a custom __getattr__ which e.g. returns a function (like with a
+ RPC wrapper), we shouldn't assume this meant "__test__ = True".
+ """
+ # https://github.com/pytest-dev/pytest/issues/1204
+ testdir.makepyfile("""
+ class MetaModel(type):
+
+ def __getattr__(cls, key):
+ return lambda: None
+
+
+ BaseModel = MetaModel('Model', (), {})
+
+
+ class Model(BaseModel):
+
+ __metaclass__ = MetaModel
+
+ def test_blah(self):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ assert not reprec.getfailedcollections()
+ call = reprec.getcalls("pytest_collection_modifyitems")[0]
+ assert not call.items
+
+
+@pytest.mark.issue351
+class TestParameterize:
+
+ def test_idfn_marker(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ def idfn(param):
+ if param == 0:
+ return 'spam'
+ elif param == 1:
+ return 'ham'
+ else:
+ return None
+
+ @pytest.mark.parametrize('a,b', [(0, 2), (1, 2)], ids=idfn)
+ def test_params(a, b):
+ pass
+ """)
+ res = testdir.runpytest('--collect-only')
+ res.stdout.fnmatch_lines([
+ "*spam-2*",
+ "*ham-2*",
+ ])
+
+ def test_idfn_fixture(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ def idfn(param):
+ if param == 0:
+ return 'spam'
+ elif param == 1:
+ return 'ham'
+ else:
+ return None
+
+ @pytest.fixture(params=[0, 1], ids=idfn)
+ def a(request):
+ return request.param
+
+ @pytest.fixture(params=[1, 2], ids=idfn)
+ def b(request):
+ return request.param
+
+ def test_params(a, b):
+ pass
+ """)
+ res = testdir.runpytest('--collect-only')
+ res.stdout.fnmatch_lines([
+ "*spam-2*",
+ "*ham-2*",
+ ])
diff --git a/testing/web-platform/tests/tools/pytest/testing/python/metafunc.py b/testing/web-platform/tests/tools/pytest/testing/python/metafunc.py
new file mode 100644
index 000000000..faa687f40
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/python/metafunc.py
@@ -0,0 +1,1094 @@
+# -*- coding: utf-8 -*-
+import re
+
+import _pytest._code
+import py
+import pytest
+from _pytest import python as funcargs
+
+class TestMetafunc:
+ def Metafunc(self, func):
+ # the unit tests of this class check if things work correctly
+ # on the funcarg level, so we don't need a full blown
+ # initiliazation
+ class FixtureInfo:
+ name2fixturedefs = None
+ def __init__(self, names):
+ self.names_closure = names
+ names = funcargs.getfuncargnames(func)
+ fixtureinfo = FixtureInfo(names)
+ return funcargs.Metafunc(func, fixtureinfo, None)
+
+ def test_no_funcargs(self, testdir):
+ def function(): pass
+ metafunc = self.Metafunc(function)
+ assert not metafunc.fixturenames
+ repr(metafunc._calls)
+
+ def test_function_basic(self):
+ def func(arg1, arg2="qwe"): pass
+ metafunc = self.Metafunc(func)
+ assert len(metafunc.fixturenames) == 1
+ assert 'arg1' in metafunc.fixturenames
+ assert metafunc.function is func
+ assert metafunc.cls is None
+
+ def test_addcall_no_args(self):
+ def func(arg1): pass
+ metafunc = self.Metafunc(func)
+ metafunc.addcall()
+ assert len(metafunc._calls) == 1
+ call = metafunc._calls[0]
+ assert call.id == "0"
+ assert not hasattr(call, 'param')
+
+ def test_addcall_id(self):
+ def func(arg1): pass
+ metafunc = self.Metafunc(func)
+ pytest.raises(ValueError, "metafunc.addcall(id=None)")
+
+ metafunc.addcall(id=1)
+ pytest.raises(ValueError, "metafunc.addcall(id=1)")
+ pytest.raises(ValueError, "metafunc.addcall(id='1')")
+ metafunc.addcall(id=2)
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].id == "1"
+ assert metafunc._calls[1].id == "2"
+
+ def test_addcall_param(self):
+ def func(arg1): pass
+ metafunc = self.Metafunc(func)
+ class obj: pass
+ metafunc.addcall(param=obj)
+ metafunc.addcall(param=obj)
+ metafunc.addcall(param=1)
+ assert len(metafunc._calls) == 3
+ assert metafunc._calls[0].getparam("arg1") == obj
+ assert metafunc._calls[1].getparam("arg1") == obj
+ assert metafunc._calls[2].getparam("arg1") == 1
+
+ def test_addcall_funcargs(self):
+ def func(x): pass
+ metafunc = self.Metafunc(func)
+ class obj: pass
+ metafunc.addcall(funcargs={"x": 2})
+ metafunc.addcall(funcargs={"x": 3})
+ pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})")
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].funcargs == {'x': 2}
+ assert metafunc._calls[1].funcargs == {'x': 3}
+ assert not hasattr(metafunc._calls[1], 'param')
+
+ def test_parametrize_error(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.parametrize("x", [1,2])
+ pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
+ pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
+ metafunc.parametrize("y", [1,2])
+ pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
+ pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
+
+ def test_parametrize_and_id(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+
+ metafunc.parametrize("x", [1,2], ids=['basic', 'advanced'])
+ metafunc.parametrize("y", ["abc", "def"])
+ ids = [x.id for x in metafunc._calls]
+ assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"]
+
+ def test_parametrize_with_wrong_number_of_ids(self, testdir):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+
+ pytest.raises(ValueError, lambda:
+ metafunc.parametrize("x", [1,2], ids=['basic']))
+
+ pytest.raises(ValueError, lambda:
+ metafunc.parametrize(("x","y"), [("abc", "def"),
+ ("ghi", "jkl")], ids=["one"]))
+
+ def test_parametrize_with_userobjects(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ class A:
+ pass
+ metafunc.parametrize("x", [A(), A()])
+ metafunc.parametrize("y", list("ab"))
+ assert metafunc._calls[0].id == "x0-a"
+ assert metafunc._calls[1].id == "x0-b"
+ assert metafunc._calls[2].id == "x1-a"
+ assert metafunc._calls[3].id == "x1-b"
+
+ @pytest.mark.skipif('sys.version_info[0] >= 3')
+ def test_unicode_idval_python2(self):
+ """unittest for the expected behavior to obtain ids for parametrized
+ unicode values in Python 2: if convertible to ascii, they should appear
+ as ascii values, otherwise fallback to hide the value behind the name
+ of the parametrized variable name. #1086
+ """
+ from _pytest.python import _idval
+ values = [
+ (u'', ''),
+ (u'ascii', 'ascii'),
+ (u'ação', 'a6'),
+ (u'josé@blah.com', 'a6'),
+ (u'δοκ.ιμή@παÏάδειγμα.δοκιμή', 'a6'),
+ ]
+ for val, expected in values:
+ assert _idval(val, 'a', 6, None) == expected
+
+ def test_bytes_idval(self):
+ """unittest for the expected behavior to obtain ids for parametrized
+ bytes values:
+ - python2: non-ascii strings are considered bytes and formatted using
+ "binary escape", where any byte < 127 is escaped into its hex form.
+ - python3: bytes objects are always escaped using "binary escape".
+ """
+ from _pytest.python import _idval
+ values = [
+ (b'', ''),
+ (b'\xc3\xb4\xff\xe4', '\\xc3\\xb4\\xff\\xe4'),
+ (b'ascii', 'ascii'),
+ (u'αÏά'.encode('utf-8'), '\\xce\\xb1\\xcf\\x81\\xce\\xac'),
+ ]
+ for val, expected in values:
+ assert _idval(val, 'a', 6, None) == expected
+
+ @pytest.mark.issue250
+ def test_idmaker_autoname(self):
+ from _pytest.python import idmaker
+ result = idmaker(("a", "b"), [("string", 1.0),
+ ("st-ring", 2.0)])
+ assert result == ["string-1.0", "st-ring-2.0"]
+
+ result = idmaker(("a", "b"), [(object(), 1.0),
+ (object(), object())])
+ assert result == ["a0-1.0", "a1-b1"]
+ # unicode mixing, issue250
+ result = idmaker((py.builtin._totext("a"), "b"), [({}, b'\xc3\xb4')])
+ assert result == ['a0-\\xc3\\xb4']
+
+ def test_idmaker_with_bytes_regex(self):
+ from _pytest.python import idmaker
+ result = idmaker(("a"), [(re.compile(b'foo'), 1.0)])
+ assert result == ["foo"]
+
+ def test_idmaker_native_strings(self):
+ from _pytest.python import idmaker
+ totext = py.builtin._totext
+ result = idmaker(("a", "b"), [(1.0, -1.1),
+ (2, -202),
+ ("three", "three hundred"),
+ (True, False),
+ (None, None),
+ (re.compile('foo'), re.compile('bar')),
+ (str, int),
+ (list("six"), [66, 66]),
+ (set([7]), set("seven")),
+ (tuple("eight"), (8, -8, 8)),
+ (b'\xc3\xb4', b"name"),
+ (b'\xc3\xb4', totext("other")),
+ ])
+ assert result == ["1.0--1.1",
+ "2--202",
+ "three-three hundred",
+ "True-False",
+ "None-None",
+ "foo-bar",
+ "str-int",
+ "a7-b7",
+ "a8-b8",
+ "a9-b9",
+ "\\xc3\\xb4-name",
+ "\\xc3\\xb4-other",
+ ]
+
+ def test_idmaker_enum(self):
+ from _pytest.python import idmaker
+ enum = pytest.importorskip("enum")
+ e = enum.Enum("Foo", "one, two")
+ result = idmaker(("a", "b"), [(e.one, e.two)])
+ assert result == ["Foo.one-Foo.two"]
+
+ @pytest.mark.issue351
+ def test_idmaker_idfn(self):
+ from _pytest.python import idmaker
+ def ids(val):
+ if isinstance(val, Exception):
+ return repr(val)
+
+ result = idmaker(("a", "b"), [(10.0, IndexError()),
+ (20, KeyError()),
+ ("three", [1, 2, 3]),
+ ], idfn=ids)
+ assert result == ["10.0-IndexError()",
+ "20-KeyError()",
+ "three-b2",
+ ]
+
+ @pytest.mark.issue351
+ def test_idmaker_idfn_unique_names(self):
+ from _pytest.python import idmaker
+ def ids(val):
+ return 'a'
+
+ result = idmaker(("a", "b"), [(10.0, IndexError()),
+ (20, KeyError()),
+ ("three", [1, 2, 3]),
+ ], idfn=ids)
+ assert result == ["0a-a",
+ "1a-a",
+ "2a-a",
+ ]
+
+ @pytest.mark.issue351
+ def test_idmaker_idfn_exception(self):
+ from _pytest.python import idmaker
+ def ids(val):
+ raise Exception("bad code")
+
+ result = idmaker(("a", "b"), [(10.0, IndexError()),
+ (20, KeyError()),
+ ("three", [1, 2, 3]),
+ ], idfn=ids)
+ assert result == ["10.0-b0",
+ "20-b1",
+ "three-b2",
+ ]
+
+ def test_addcall_and_parametrize(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.addcall({'x': 1})
+ metafunc.parametrize('y', [2,3])
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2}
+ assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3}
+ assert metafunc._calls[0].id == "0-2"
+ assert metafunc._calls[1].id == "0-3"
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.parametrize('x', [1], indirect=True)
+ metafunc.parametrize('y', [2,3], indirect=True)
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].funcargs == {}
+ assert metafunc._calls[1].funcargs == {}
+ assert metafunc._calls[0].params == dict(x=1,y=2)
+ assert metafunc._calls[1].params == dict(x=1,y=3)
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect_list(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.parametrize('x, y', [('a', 'b')], indirect=['x'])
+ assert metafunc._calls[0].funcargs == dict(y='b')
+ assert metafunc._calls[0].params == dict(x='a')
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect_list_all(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'y'])
+ assert metafunc._calls[0].funcargs == {}
+ assert metafunc._calls[0].params == dict(x='a', y='b')
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect_list_empty(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.parametrize('x, y', [('a', 'b')], indirect=[])
+ assert metafunc._calls[0].funcargs == dict(x='a', y='b')
+ assert metafunc._calls[0].params == {}
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect_list_functional(self, testdir):
+ """
+ Test parametrization with 'indirect' parameter applied on
+ particular arguments. As y is is direct, its value should
+ be used directly rather than being passed to the fixture
+ y.
+
+ :param testdir: the instance of Testdir class, a temporary
+ test directory.
+ """
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope='function')
+ def x(request):
+ return request.param * 3
+ @pytest.fixture(scope='function')
+ def y(request):
+ return request.param * 2
+ @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
+ def test_simple(x,y):
+ assert len(x) == 3
+ assert len(y) == 1
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*test_simple*a-b*",
+ "*1 passed*",
+ ])
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect_list_error(self, testdir):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ with pytest.raises(ValueError):
+ metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'z'])
+
+ @pytest.mark.issue714
+ def test_parametrize_uses_no_fixture_error_indirect_false(self, testdir):
+ """The 'uses no fixture' error tells the user at collection time
+ that the parametrize data they've set up doesn't correspond to the
+ fixtures in their test function, rather than silently ignoring this
+ and letting the test potentially pass.
+ """
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=False)
+ def test_simple(x):
+ assert len(x) == 3
+ """)
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*uses no fixture 'y'*",
+ ])
+
+ @pytest.mark.issue714
+ def test_parametrize_uses_no_fixture_error_indirect_true(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope='function')
+ def x(request):
+ return request.param * 3
+ @pytest.fixture(scope='function')
+ def y(request):
+ return request.param * 2
+
+ @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=True)
+ def test_simple(x):
+ assert len(x) == 3
+ """)
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*uses no fixture 'y'*",
+ ])
+
+ @pytest.mark.issue714
+ def test_parametrize_indirect_uses_no_fixture_error_indirect_list(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope='function')
+ def x(request):
+ return request.param * 3
+
+ @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
+ def test_simple(x):
+ assert len(x) == 3
+ """)
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*uses no fixture 'y'*",
+ ])
+
+ def test_addcalls_and_parametrize_indirect(self):
+ def func(x, y): pass
+ metafunc = self.Metafunc(func)
+ metafunc.addcall(param="123")
+ metafunc.parametrize('x', [1], indirect=True)
+ metafunc.parametrize('y', [2,3], indirect=True)
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].funcargs == {}
+ assert metafunc._calls[1].funcargs == {}
+ assert metafunc._calls[0].params == dict(x=1,y=2)
+ assert metafunc._calls[1].params == dict(x=1,y=3)
+
+ def test_parametrize_functional(self, testdir):
+ testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.parametrize('x', [1,2], indirect=True)
+ metafunc.parametrize('y', [2])
+ def pytest_funcarg__x(request):
+ return request.param * 10
+ #def pytest_funcarg__y(request):
+ # return request.param
+
+ def test_simple(x,y):
+ assert x in (10,20)
+ assert y == 2
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*test_simple*1-2*",
+ "*test_simple*2-2*",
+ "*2 passed*",
+ ])
+
+ def test_parametrize_onearg(self):
+ metafunc = self.Metafunc(lambda x: None)
+ metafunc.parametrize("x", [1,2])
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].funcargs == dict(x=1)
+ assert metafunc._calls[0].id == "1"
+ assert metafunc._calls[1].funcargs == dict(x=2)
+ assert metafunc._calls[1].id == "2"
+
+ def test_parametrize_onearg_indirect(self):
+ metafunc = self.Metafunc(lambda x: None)
+ metafunc.parametrize("x", [1,2], indirect=True)
+ assert metafunc._calls[0].params == dict(x=1)
+ assert metafunc._calls[0].id == "1"
+ assert metafunc._calls[1].params == dict(x=2)
+ assert metafunc._calls[1].id == "2"
+
+ def test_parametrize_twoargs(self):
+ metafunc = self.Metafunc(lambda x,y: None)
+ metafunc.parametrize(("x", "y"), [(1,2), (3,4)])
+ assert len(metafunc._calls) == 2
+ assert metafunc._calls[0].funcargs == dict(x=1, y=2)
+ assert metafunc._calls[0].id == "1-2"
+ assert metafunc._calls[1].funcargs == dict(x=3, y=4)
+ assert metafunc._calls[1].id == "3-4"
+
+ def test_parametrize_multiple_times(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ pytestmark = pytest.mark.parametrize("x", [1,2])
+ def test_func(x):
+ assert 0, x
+ class TestClass:
+ pytestmark = pytest.mark.parametrize("y", [3,4])
+ def test_meth(self, x, y):
+ assert 0, x
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.assert_outcomes(failed=6)
+
+ def test_parametrize_CSV(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize("x, y,", [(1,2), (2,3)])
+ def test_func(x, y):
+ assert x+1 == y
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
+
+ def test_parametrize_class_scenarios(self, testdir):
+ testdir.makepyfile("""
+ # same as doc/en/example/parametrize scenario example
+ def pytest_generate_tests(metafunc):
+ idlist = []
+ argvalues = []
+ for scenario in metafunc.cls.scenarios:
+ idlist.append(scenario[0])
+ items = scenario[1].items()
+ argnames = [x[0] for x in items]
+ argvalues.append(([x[1] for x in items]))
+ metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
+
+ class Test(object):
+ scenarios = [['1', {'arg': {1: 2}, "arg2": "value2"}],
+ ['2', {'arg':'value2', "arg2": "value2"}]]
+
+ def test_1(self, arg, arg2):
+ pass
+
+ def test_2(self, arg2, arg):
+ pass
+
+ def test_3(self, arg, arg2):
+ pass
+ """)
+ result = testdir.runpytest("-v")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines("""
+ *test_1*1*
+ *test_2*1*
+ *test_3*1*
+ *test_1*2*
+ *test_2*2*
+ *test_3*2*
+ *6 passed*
+ """)
+
+ def test_format_args(self):
+ def function1(): pass
+ assert funcargs._format_args(function1) == '()'
+
+ def function2(arg1): pass
+ assert funcargs._format_args(function2) == "(arg1)"
+
+ def function3(arg1, arg2="qwe"): pass
+ assert funcargs._format_args(function3) == "(arg1, arg2='qwe')"
+
+ def function4(arg1, *args, **kwargs): pass
+ assert funcargs._format_args(function4) == "(arg1, *args, **kwargs)"
+
+
+class TestMetafuncFunctional:
+ def test_attributes(self, testdir):
+ p = testdir.makepyfile("""
+ # assumes that generate/provide runs in the same process
+ import py, pytest
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall(param=metafunc)
+
+ def pytest_funcarg__metafunc(request):
+ assert request._pyfuncitem._genid == "0"
+ return request.param
+
+ def test_function(metafunc, pytestconfig):
+ assert metafunc.config == pytestconfig
+ assert metafunc.module.__name__ == __name__
+ assert metafunc.function == test_function
+ assert metafunc.cls is None
+
+ class TestClass:
+ def test_method(self, metafunc, pytestconfig):
+ assert metafunc.config == pytestconfig
+ assert metafunc.module.__name__ == __name__
+ if py.std.sys.version_info > (3, 0):
+ unbound = TestClass.test_method
+ else:
+ unbound = TestClass.test_method.im_func
+ # XXX actually have an unbound test function here?
+ assert metafunc.function == unbound
+ assert metafunc.cls == TestClass
+ """)
+ result = testdir.runpytest(p, "-v")
+ result.assert_outcomes(passed=2)
+
+ def test_addcall_with_two_funcargs_generators(self, testdir):
+ testdir.makeconftest("""
+ def pytest_generate_tests(metafunc):
+ assert "arg1" in metafunc.fixturenames
+ metafunc.addcall(funcargs=dict(arg1=1, arg2=2))
+ """)
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall(funcargs=dict(arg1=1, arg2=1))
+
+ class TestClass:
+ def test_myfunc(self, arg1, arg2):
+ assert arg1 == arg2
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_myfunc*0*PASS*",
+ "*test_myfunc*1*FAIL*",
+ "*1 failed, 1 passed*"
+ ])
+
+ def test_two_functions(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall(param=10)
+ metafunc.addcall(param=20)
+
+ def pytest_funcarg__arg1(request):
+ return request.param
+
+ def test_func1(arg1):
+ assert arg1 == 10
+ def test_func2(arg1):
+ assert arg1 in (10, 20)
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_func1*0*PASS*",
+ "*test_func1*1*FAIL*",
+ "*test_func2*PASS*",
+ "*1 failed, 3 passed*"
+ ])
+
+ def test_noself_in_method(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ assert 'xyz' not in metafunc.fixturenames
+
+ class TestHello:
+ def test_hello(xyz):
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.assert_outcomes(passed=1)
+
+
+ def test_generate_plugin_and_module(self, testdir):
+ testdir.makeconftest("""
+ def pytest_generate_tests(metafunc):
+ assert "arg1" in metafunc.fixturenames
+ metafunc.addcall(id="world", param=(2,100))
+ """)
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall(param=(1,1), id="hello")
+
+ def pytest_funcarg__arg1(request):
+ return request.param[0]
+ def pytest_funcarg__arg2(request):
+ return request.param[1]
+
+ class TestClass:
+ def test_myfunc(self, arg1, arg2):
+ assert arg1 == arg2
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_myfunc*hello*PASS*",
+ "*test_myfunc*world*FAIL*",
+ "*1 failed, 1 passed*"
+ ])
+
+ def test_generate_tests_in_class(self, testdir):
+ p = testdir.makepyfile("""
+ class TestClass:
+ def pytest_generate_tests(self, metafunc):
+ metafunc.addcall(funcargs={'hello': 'world'}, id="hello")
+
+ def test_myfunc(self, hello):
+ assert hello == "world"
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_myfunc*hello*PASS*",
+ "*1 passed*"
+ ])
+
+ def test_two_functions_not_same_instance(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall({'arg1': 10})
+ metafunc.addcall({'arg1': 20})
+
+ class TestClass:
+ def test_func(self, arg1):
+ assert not hasattr(self, 'x')
+ self.x = 1
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_func*0*PASS*",
+ "*test_func*1*PASS*",
+ "*2 pass*",
+ ])
+
+ def test_issue28_setup_method_in_generate_tests(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall({'arg1': 1})
+
+ class TestClass:
+ def test_method(self, arg1):
+ assert arg1 == self.val
+ def setup_method(self, func):
+ self.val = 1
+ """)
+ result = testdir.runpytest(p)
+ result.assert_outcomes(passed=1)
+
+ def test_parametrize_functional2(self, testdir):
+ testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.parametrize("arg1", [1,2])
+ metafunc.parametrize("arg2", [4,5])
+ def test_hello(arg1, arg2):
+ assert 0, (arg1, arg2)
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*(1, 4)*",
+ "*(1, 5)*",
+ "*(2, 4)*",
+ "*(2, 5)*",
+ "*4 failed*",
+ ])
+
+ def test_parametrize_and_inner_getfuncargvalue(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.parametrize("arg1", [1], indirect=True)
+ metafunc.parametrize("arg2", [10], indirect=True)
+
+ def pytest_funcarg__arg1(request):
+ x = request.getfuncargvalue("arg2")
+ return x + request.param
+
+ def pytest_funcarg__arg2(request):
+ return request.param
+
+ def test_func1(arg1, arg2):
+ assert arg1 == 11
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_func1*1*PASS*",
+ "*1 passed*"
+ ])
+
+ def test_parametrize_on_setup_arg(self, testdir):
+ p = testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ assert "arg1" in metafunc.fixturenames
+ metafunc.parametrize("arg1", [1], indirect=True)
+
+ def pytest_funcarg__arg1(request):
+ return request.param
+
+ def pytest_funcarg__arg2(request, arg1):
+ return 10 * arg1
+
+ def test_func(arg2):
+ assert arg2 == 10
+ """)
+ result = testdir.runpytest("-v", p)
+ result.stdout.fnmatch_lines([
+ "*test_func*1*PASS*",
+ "*1 passed*"
+ ])
+
+ def test_parametrize_with_ids(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def pytest_generate_tests(metafunc):
+ metafunc.parametrize(("a", "b"), [(1,1), (1,2)],
+ ids=["basic", "advanced"])
+
+ def test_function(a, b):
+ assert a == b
+ """)
+ result = testdir.runpytest("-v")
+ assert result.ret == 1
+ result.stdout.fnmatch_lines_random([
+ "*test_function*basic*PASSED",
+ "*test_function*advanced*FAILED",
+ ])
+
+ def test_parametrize_without_ids(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def pytest_generate_tests(metafunc):
+ metafunc.parametrize(("a", "b"),
+ [(1,object()), (1.3,object())])
+
+ def test_function(a, b):
+ assert 1
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines("""
+ *test_function*1-b0*
+ *test_function*1.3-b1*
+ """)
+
+ @pytest.mark.parametrize(("scope", "length"),
+ [("module", 2), ("function", 4)])
+ def test_parametrize_scope_overrides(self, testdir, scope, length):
+ testdir.makepyfile("""
+ import pytest
+ l = []
+ def pytest_generate_tests(metafunc):
+ if "arg" in metafunc.funcargnames:
+ metafunc.parametrize("arg", [1,2], indirect=True,
+ scope=%r)
+ def pytest_funcarg__arg(request):
+ l.append(request.param)
+ return request.param
+ def test_hello(arg):
+ assert arg in (1,2)
+ def test_world(arg):
+ assert arg in (1,2)
+ def test_checklength():
+ assert len(l) == %d
+ """ % (scope, length))
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=5)
+
+ def test_parametrize_issue323(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope='module', params=range(966))
+ def foo(request):
+ return request.param
+
+ def test_it(foo):
+ pass
+ def test_it2(foo):
+ pass
+ """)
+ reprec = testdir.inline_run("--collect-only")
+ assert not reprec.getcalls("pytest_internalerror")
+
+ def test_usefixtures_seen_in_generate_tests(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def pytest_generate_tests(metafunc):
+ assert "abc" in metafunc.fixturenames
+ metafunc.parametrize("abc", [1])
+
+ @pytest.mark.usefixtures("abc")
+ def test_function():
+ pass
+ """)
+ reprec = testdir.runpytest()
+ reprec.assert_outcomes(passed=1)
+
+ def test_generate_tests_only_done_in_subdir(self, testdir):
+ sub1 = testdir.mkpydir("sub1")
+ sub2 = testdir.mkpydir("sub2")
+ sub1.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_generate_tests(metafunc):
+ assert metafunc.function.__name__ == "test_1"
+ """))
+ sub2.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_generate_tests(metafunc):
+ assert metafunc.function.__name__ == "test_2"
+ """))
+ sub1.join("test_in_sub1.py").write("def test_1(): pass")
+ sub2.join("test_in_sub2.py").write("def test_2(): pass")
+ result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
+ result.assert_outcomes(passed=3)
+
+ def test_generate_same_function_names_issue403(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ def make_tests():
+ @pytest.mark.parametrize("x", range(2))
+ def test_foo(x):
+ pass
+ return test_foo
+
+ test_x = make_tests()
+ test_y = make_tests()
+ """)
+ reprec = testdir.runpytest()
+ reprec.assert_outcomes(passed=4)
+
+ @pytest.mark.issue463
+ @pytest.mark.parametrize('attr', ['parametrise', 'parameterize',
+ 'parameterise'])
+ def test_parametrize_misspelling(self, testdir, attr):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.{0}("x", range(2))
+ def test_foo(x):
+ pass
+ """.format(attr))
+ reprec = testdir.inline_run('--collectonly')
+ failures = reprec.getfailures()
+ assert len(failures) == 1
+ expectederror = "MarkerError: test_foo has '{0}', spelling should be 'parametrize'".format(attr)
+ assert expectederror in failures[0].longrepr.reprcrash.message
+
+
+class TestMarkersWithParametrization:
+ pytestmark = pytest.mark.issue308
+ def test_simple_mark(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for item in items:
+ assert 'foo' in item.keywords
+ assert 'bar' not in items[0].keywords
+ assert 'bar' in items[1].keywords
+ assert 'bar' not in items[2].keywords
+
+ def test_select_based_on_mark(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.foo((2, 3)),
+ (3, 4),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ rec = testdir.inline_run("-m", 'foo')
+ passed, skipped, fail = rec.listoutcomes()
+ assert len(passed) == 1
+ assert len(skipped) == 0
+ assert len(fail) == 0
+
+ @pytest.mark.xfail(reason="is this important to support??")
+ def test_nested_marks(self, testdir):
+ s = """
+ import pytest
+ mastermark = pytest.mark.foo(pytest.mark.bar)
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ mastermark((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ items = testdir.getitems(s)
+ assert len(items) == 3
+ for mark in ['foo', 'bar']:
+ assert mark not in items[0].keywords
+ assert mark in items[1].keywords
+ assert mark not in items[2].keywords
+
+ def test_simple_xfail(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ # xfail is skip??
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_simple_xfail_single_argname(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize("n", [
+ 2,
+ pytest.mark.xfail(3),
+ 4,
+ ])
+ def test_isEven(n):
+ assert n % 2 == 0
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_with_arg(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("True")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_with_kwarg(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail(reason="some bug")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_with_arg_and_kwarg(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("True", reason="some bug")((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=1)
+
+ def test_xfail_passing_is_xpass(self, testdir):
+ s = """
+ import pytest
+
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
+ (3, 4),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ # xpass is fail, obviously :)
+ reprec.assertoutcome(passed=2, failed=1)
+
+ def test_parametrize_called_in_generate_tests(self, testdir):
+ s = """
+ import pytest
+
+
+ def pytest_generate_tests(metafunc):
+ passingTestData = [(1, 2),
+ (2, 3)]
+ failingTestData = [(1, 3),
+ (2, 2)]
+
+ testData = passingTestData + [pytest.mark.xfail(d)
+ for d in failingTestData]
+ metafunc.parametrize(("n", "expected"), testData)
+
+
+ def test_increment(n, expected):
+ assert n + 1 == expected
+ """
+ testdir.makepyfile(s)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2, skipped=2)
+
+
+ @pytest.mark.issue290
+ def test_parametrize_ID_generation_string_int_works(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def myfixture():
+ return 'example'
+ @pytest.mark.parametrize(
+ 'limit', (0, '0'))
+ def test_limit(limit, myfixture):
+ return
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=2)
diff --git a/testing/web-platform/tests/tools/pytest/testing/python/raises.py b/testing/web-platform/tests/tools/pytest/testing/python/raises.py
new file mode 100644
index 000000000..0ea7f9bee
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/python/raises.py
@@ -0,0 +1,78 @@
+import pytest
+
+class TestRaises:
+ def test_raises(self):
+ source = "int('qwe')"
+ excinfo = pytest.raises(ValueError, source)
+ code = excinfo.traceback[-1].frame.code
+ s = str(code.fullsource)
+ assert s == source
+
+ def test_raises_exec(self):
+ pytest.raises(ValueError, "a,x = []")
+
+ def test_raises_syntax_error(self):
+ pytest.raises(SyntaxError, "qwe qwe qwe")
+
+ def test_raises_function(self):
+ pytest.raises(ValueError, int, 'hello')
+
+ def test_raises_callable_no_exception(self):
+ class A:
+ def __call__(self):
+ pass
+ try:
+ pytest.raises(ValueError, A())
+ except pytest.raises.Exception:
+ pass
+
+ def test_raises_flip_builtin_AssertionError(self):
+ # we replace AssertionError on python level
+ # however c code might still raise the builtin one
+ from _pytest.assertion.util import BuiltinAssertionError # noqa
+ pytest.raises(AssertionError,"""
+ raise BuiltinAssertionError
+ """)
+
+ def test_raises_as_contextmanager(self, testdir):
+ testdir.makepyfile("""
+ from __future__ import with_statement
+ import py, pytest
+ import _pytest._code
+
+ def test_simple():
+ with pytest.raises(ZeroDivisionError) as excinfo:
+ assert isinstance(excinfo, _pytest._code.ExceptionInfo)
+ 1/0
+ print (excinfo)
+ assert excinfo.type == ZeroDivisionError
+ assert isinstance(excinfo.value, ZeroDivisionError)
+
+ def test_noraise():
+ with pytest.raises(pytest.raises.Exception):
+ with pytest.raises(ValueError):
+ int()
+
+ def test_raise_wrong_exception_passes_by():
+ with pytest.raises(ZeroDivisionError):
+ with pytest.raises(ValueError):
+ 1/0
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ '*3 passed*',
+ ])
+
+ def test_noclass(self):
+ with pytest.raises(TypeError):
+ pytest.raises('wrong', lambda: None)
+
+ def test_tuple(self):
+ with pytest.raises((KeyError, ValueError)):
+ raise KeyError('oops')
+
+ def test_no_raise_message(self):
+ try:
+ pytest.raises(ValueError, int, '0')
+ except pytest.raises.Exception as e:
+ assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError))
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_argcomplete.py b/testing/web-platform/tests/tools/pytest/testing/test_argcomplete.py
new file mode 100644
index 000000000..ace7d8ceb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_argcomplete.py
@@ -0,0 +1,90 @@
+from __future__ import with_statement
+import py, pytest
+
+# test for _argcomplete but not specific for any application
+
+def equal_with_bash(prefix, ffc, fc, out=None):
+ res = ffc(prefix)
+ res_bash = set(fc(prefix))
+ retval = set(res) == res_bash
+ if out:
+ out.write('equal_with_bash %s %s\n' % (retval, res))
+ if not retval:
+ out.write(' python - bash: %s\n' % (set(res) - res_bash))
+ out.write(' bash - python: %s\n' % (res_bash - set(res)))
+ return retval
+
+# copied from argcomplete.completers as import from there
+# also pulls in argcomplete.__init__ which opens filedescriptor 9
+# this gives an IOError at the end of testrun
+def _wrapcall(*args, **kargs):
+ try:
+ if py.std.sys.version_info > (2,7):
+ return py.std.subprocess.check_output(*args,**kargs).decode().splitlines()
+ if 'stdout' in kargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ process = py.std.subprocess.Popen(
+ stdout=py.std.subprocess.PIPE, *args, **kargs)
+ output, unused_err = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kargs.get("args")
+ if cmd is None:
+ cmd = args[0]
+ raise py.std.subprocess.CalledProcessError(retcode, cmd)
+ return output.decode().splitlines()
+ except py.std.subprocess.CalledProcessError:
+ return []
+
+class FilesCompleter(object):
+ 'File completer class, optionally takes a list of allowed extensions'
+ def __init__(self,allowednames=(),directories=True):
+ # Fix if someone passes in a string instead of a list
+ if type(allowednames) is str:
+ allowednames = [allowednames]
+
+ self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames]
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ completion = []
+ if self.allowednames:
+ if self.directories:
+ files = _wrapcall(['bash','-c',
+ "compgen -A directory -- '{p}'".format(p=prefix)])
+ completion += [ f + '/' for f in files]
+ for x in self.allowednames:
+ completion += _wrapcall(['bash', '-c',
+ "compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)])
+ else:
+ completion += _wrapcall(['bash', '-c',
+ "compgen -A file -- '{p}'".format(p=prefix)])
+
+ anticomp = _wrapcall(['bash', '-c',
+ "compgen -A directory -- '{p}'".format(p=prefix)])
+
+ completion = list( set(completion) - set(anticomp))
+
+ if self.directories:
+ completion += [f + '/' for f in anticomp]
+ return completion
+
+class TestArgComplete:
+ @pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
+ def test_compare_with_compgen(self):
+ from _pytest._argcomplete import FastFilesCompleter
+ ffc = FastFilesCompleter()
+ fc = FilesCompleter()
+ for x in '/ /d /data qqq'.split():
+ assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
+
+ @pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
+ def test_remove_dir_prefix(self):
+ """this is not compatible with compgen but it is with bash itself:
+ ls /usr/<TAB>
+ """
+ from _pytest._argcomplete import FastFilesCompleter
+ ffc = FastFilesCompleter()
+ fc = FilesCompleter()
+ for x in '/usr/'.split():
+ assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_assertinterpret.py b/testing/web-platform/tests/tools/pytest/testing/test_assertinterpret.py
new file mode 100644
index 000000000..67a352ce7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_assertinterpret.py
@@ -0,0 +1,274 @@
+"PYTEST_DONT_REWRITE"
+import py
+import pytest
+from _pytest.assertion import util
+
+
+def exvalue():
+ return py.std.sys.exc_info()[1]
+
+def f():
+ return 2
+
+def test_not_being_rewritten():
+ assert "@py_builtins" not in globals()
+
+def test_assert():
+ try:
+ assert f() == 3
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 == 3\n')
+
+def test_assert_with_explicit_message():
+ try:
+ assert f() == 3, "hello"
+ except AssertionError:
+ e = exvalue()
+ assert e.msg == 'hello'
+
+def test_assert_within_finally():
+ excinfo = pytest.raises(ZeroDivisionError, """
+ try:
+ 1/0
+ finally:
+ i = 42
+ """)
+ s = excinfo.exconly()
+ assert py.std.re.search("division.+by zero", s) is not None
+
+ #def g():
+ # A.f()
+ #excinfo = getexcinfo(TypeError, g)
+ #msg = getmsg(excinfo)
+ #assert msg.find("must be called with A") != -1
+
+
+def test_assert_multiline_1():
+ try:
+ assert (f() ==
+ 3)
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 == 3\n')
+
+def test_assert_multiline_2():
+ try:
+ assert (f() == (4,
+ 3)[-1])
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith('assert 2 ==')
+
+def test_in():
+ try:
+ assert "hi" in [1, 2]
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 'hi' in")
+
+def test_is():
+ try:
+ assert 1 is 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 is 2")
+
+
+def test_attrib():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ try:
+ assert i.b == 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 == 2")
+
+def test_attrib_inst():
+ class Foo(object):
+ b = 1
+ try:
+ assert Foo().b == 2
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 1 == 2")
+
+def test_len():
+ l = list(range(42))
+ try:
+ assert len(l) == 100
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert s.startswith("assert 42 == 100")
+ assert "where 42 = len([" in s
+
+def test_assert_non_string_message():
+ class A:
+ def __str__(self):
+ return "hello"
+ try:
+ assert 0 == 1, A()
+ except AssertionError:
+ e = exvalue()
+ assert e.msg == "hello"
+
+def test_assert_keyword_arg():
+ def f(x=3):
+ return False
+ try:
+ assert f(x=5)
+ except AssertionError:
+ e = exvalue()
+ assert "x=5" in e.msg
+
+def test_private_class_variable():
+ class X:
+ def __init__(self):
+ self.__v = 41
+ def m(self):
+ assert self.__v == 42
+ try:
+ X().m()
+ except AssertionError:
+ e = exvalue()
+ assert "== 42" in e.msg
+
+# These tests should both fail, but should fail nicely...
+class WeirdRepr:
+ def __repr__(self):
+ return '<WeirdRepr\nsecond line>'
+
+def bug_test_assert_repr():
+ v = WeirdRepr()
+ try:
+ assert v == 1
+ except AssertionError:
+ e = exvalue()
+ assert e.msg.find('WeirdRepr') != -1
+ assert e.msg.find('second line') != -1
+ assert 0
+
+def test_assert_non_string():
+ try:
+ assert 0, ['list']
+ except AssertionError:
+ e = exvalue()
+ assert e.msg.find("list") != -1
+
+def test_assert_implicit_multiline():
+ try:
+ x = [1,2,3]
+ assert x != [1,
+ 2, 3]
+ except AssertionError:
+ e = exvalue()
+ assert e.msg.find('assert [1, 2, 3] !=') != -1
+
+
+def test_assert_with_brokenrepr_arg():
+ class BrokenRepr:
+ def __repr__(self): 0 / 0
+ e = AssertionError(BrokenRepr())
+ if e.msg.find("broken __repr__") == -1:
+ pytest.fail("broken __repr__ not handle correctly")
+
+def test_multiple_statements_per_line():
+ try:
+ a = 1; assert a == 2
+ except AssertionError:
+ e = exvalue()
+ assert "assert 1 == 2" in e.msg
+
+def test_power():
+ try:
+ assert 2**3 == 7
+ except AssertionError:
+ e = exvalue()
+ assert "assert (2 ** 3) == 7" in e.msg
+
+
+def test_assert_customizable_reprcompare(monkeypatch):
+ monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
+ try:
+ assert 3 == 4
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert "hello" in s
+
+def test_assert_long_source_1():
+ try:
+ assert len == [
+ (None, ['somet text', 'more text']),
+ ]
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert 're-run' not in s
+ assert 'somet text' in s
+
+def test_assert_long_source_2():
+ try:
+ assert(len == [
+ (None, ['somet text', 'more text']),
+ ])
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert 're-run' not in s
+ assert 'somet text' in s
+
+def test_assert_raise_alias(testdir):
+ testdir.makepyfile("""
+ "PYTEST_DONT_REWRITE"
+ import sys
+ EX = AssertionError
+ def test_hello():
+ raise EX("hello"
+ "multi"
+ "line")
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*def test_hello*",
+ "*raise EX*",
+ "*1 failed*",
+ ])
+
+
+def test_assert_raise_subclass():
+ class SomeEx(AssertionError):
+ def __init__(self, *args):
+ super(SomeEx, self).__init__()
+ try:
+ raise SomeEx("hello")
+ except AssertionError:
+ s = str(exvalue())
+ assert 're-run' not in s
+ assert 'could not determine' in s
+
+def test_assert_raises_in_nonzero_of_object_pytest_issue10():
+ class A(object):
+ def __nonzero__(self):
+ raise ValueError(42)
+ def __lt__(self, other):
+ return A()
+ def __repr__(self):
+ return "<MY42 object>"
+ def myany(x):
+ return True
+ try:
+ assert not(myany(A() < 0))
+ except AssertionError:
+ e = exvalue()
+ s = str(e)
+ assert "<MY42 object> < 0" in s
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_assertion.py b/testing/web-platform/tests/tools/pytest/testing/test_assertion.py
new file mode 100644
index 000000000..347278e19
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_assertion.py
@@ -0,0 +1,628 @@
+# -*- coding: utf-8 -*-
+import sys
+import textwrap
+
+import _pytest.assertion as plugin
+import _pytest._code
+import py
+import pytest
+from _pytest.assertion import reinterpret
+from _pytest.assertion import util
+
+PY3 = sys.version_info >= (3, 0)
+
+
+@pytest.fixture
+def mock_config():
+ class Config(object):
+ verbose = False
+ def getoption(self, name):
+ if name == 'verbose':
+ return self.verbose
+ raise KeyError('Not mocked out: %s' % name)
+ return Config()
+
+
+def interpret(expr):
+ return reinterpret.reinterpret(expr, _pytest._code.Frame(sys._getframe(1)))
+
+class TestBinReprIntegration:
+
+ def test_pytest_assertrepr_compare_called(self, testdir):
+ testdir.makeconftest("""
+ l = []
+ def pytest_assertrepr_compare(op, left, right):
+ l.append((op, left, right))
+ def pytest_funcarg__l(request):
+ return l
+ """)
+ testdir.makepyfile("""
+ def test_hello():
+ assert 0 == 1
+ def test_check(l):
+ assert l == [("==", 0, 1)]
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*test_hello*FAIL*",
+ "*test_check*PASS*",
+ ])
+
+def callequal(left, right, verbose=False):
+ config = mock_config()
+ config.verbose = verbose
+ return plugin.pytest_assertrepr_compare(config, '==', left, right)
+
+
+class TestAssert_reprcompare:
+ def test_different_types(self):
+ assert callequal([0, 1], 'foo') is None
+
+ def test_summary(self):
+ summary = callequal([0, 1], [0, 2])[0]
+ assert len(summary) < 65
+
+ def test_text_diff(self):
+ diff = callequal('spam', 'eggs')[1:]
+ assert '- spam' in diff
+ assert '+ eggs' in diff
+
+ def test_text_skipping(self):
+ lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs')
+ assert 'Skipping' in lines[1]
+ for line in lines:
+ assert 'a'*50 not in line
+
+ def test_text_skipping_verbose(self):
+ lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True)
+ assert '- ' + 'a'*50 + 'spam' in lines
+ assert '+ ' + 'a'*50 + 'eggs' in lines
+
+ def test_multiline_text_diff(self):
+ left = 'foo\nspam\nbar'
+ right = 'foo\neggs\nbar'
+ diff = callequal(left, right)
+ assert '- spam' in diff
+ assert '+ eggs' in diff
+
+ def test_list(self):
+ expl = callequal([0, 1], [0, 2])
+ assert len(expl) > 1
+
+ @pytest.mark.parametrize(
+ ['left', 'right', 'expected'], [
+ ([0, 1], [0, 2], """
+ Full diff:
+ - [0, 1]
+ ? ^
+ + [0, 2]
+ ? ^
+ """),
+ ({0: 1}, {0: 2}, """
+ Full diff:
+ - {0: 1}
+ ? ^
+ + {0: 2}
+ ? ^
+ """),
+ (set([0, 1]), set([0, 2]), """
+ Full diff:
+ - set([0, 1])
+ ? ^
+ + set([0, 2])
+ ? ^
+ """ if not PY3 else """
+ Full diff:
+ - {0, 1}
+ ? ^
+ + {0, 2}
+ ? ^
+ """)
+ ]
+ )
+ def test_iterable_full_diff(self, left, right, expected):
+ """Test the full diff assertion failure explanation.
+
+ When verbose is False, then just a -v notice to get the diff is rendered,
+ when verbose is True, then ndiff of the pprint is returned.
+ """
+ expl = callequal(left, right, verbose=False)
+ assert expl[-1] == 'Use -v to get the full diff'
+ expl = '\n'.join(callequal(left, right, verbose=True))
+ assert expl.endswith(textwrap.dedent(expected).strip())
+
+ def test_list_different_lenghts(self):
+ expl = callequal([0, 1], [0, 1, 2])
+ assert len(expl) > 1
+ expl = callequal([0, 1, 2], [0, 1])
+ assert len(expl) > 1
+
+ def test_dict(self):
+ expl = callequal({'a': 0}, {'a': 1})
+ assert len(expl) > 1
+
+ def test_dict_omitting(self):
+ lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1})
+ assert lines[1].startswith('Omitting 1 identical item')
+ assert 'Common items' not in lines
+ for line in lines[1:]:
+ assert 'b' not in line
+
+ def test_dict_omitting_verbose(self):
+ lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True)
+ assert lines[1].startswith('Common items:')
+ assert 'Omitting' not in lines[1]
+ assert lines[2] == "{'b': 1}"
+
+ def test_set(self):
+ expl = callequal(set([0, 1]), set([0, 2]))
+ assert len(expl) > 1
+
+ def test_frozenzet(self):
+ expl = callequal(frozenset([0, 1]), set([0, 2]))
+ assert len(expl) > 1
+
+ def test_Sequence(self):
+ col = py.builtin._tryimport(
+ "collections.abc",
+ "collections",
+ "sys")
+ if not hasattr(col, "MutableSequence"):
+ pytest.skip("cannot import MutableSequence")
+ MutableSequence = col.MutableSequence
+
+ class TestSequence(MutableSequence): # works with a Sequence subclass
+ def __init__(self, iterable):
+ self.elements = list(iterable)
+
+ def __getitem__(self, item):
+ return self.elements[item]
+
+ def __len__(self):
+ return len(self.elements)
+
+ def __setitem__(self, item, value):
+ pass
+
+ def __delitem__(self, item):
+ pass
+
+ def insert(self, item, index):
+ pass
+
+ expl = callequal(TestSequence([0, 1]), list([0, 2]))
+ assert len(expl) > 1
+
+ def test_list_tuples(self):
+ expl = callequal([], [(1,2)])
+ assert len(expl) > 1
+ expl = callequal([(1,2)], [])
+ assert len(expl) > 1
+
+ def test_list_bad_repr(self):
+ class A:
+ def __repr__(self):
+ raise ValueError(42)
+ expl = callequal([], [A()])
+ assert 'ValueError' in "".join(expl)
+ expl = callequal({}, {'1': A()})
+ assert 'faulty' in "".join(expl)
+
+ def test_one_repr_empty(self):
+ """
+ the faulty empty string repr did trigger
+ a unbound local error in _diff_text
+ """
+ class A(str):
+ def __repr__(self):
+ return ''
+ expl = callequal(A(), '')
+ assert not expl
+
+ def test_repr_no_exc(self):
+ expl = ' '.join(callequal('foo', 'bar'))
+ assert 'raised in repr()' not in expl
+
+ def test_unicode(self):
+ left = py.builtin._totext('£€', 'utf-8')
+ right = py.builtin._totext('£', 'utf-8')
+ expl = callequal(left, right)
+ assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8')
+ assert expl[1] == py.builtin._totext('- £€', 'utf-8')
+ assert expl[2] == py.builtin._totext('+ £', 'utf-8')
+
+ def test_nonascii_text(self):
+ """
+ :issue: 877
+ non ascii python2 str caused a UnicodeDecodeError
+ """
+ class A(str):
+ def __repr__(self):
+ return '\xff'
+ expl = callequal(A(), '1')
+ assert expl
+
+ def test_format_nonascii_explanation(self):
+ assert util.format_explanation('λ')
+
+ def test_mojibake(self):
+ # issue 429
+ left = 'e'
+ right = '\xc3\xa9'
+ if not isinstance(left, py.builtin.bytes):
+ left = py.builtin.bytes(left, 'utf-8')
+ right = py.builtin.bytes(right, 'utf-8')
+ expl = callequal(left, right)
+ for line in expl:
+ assert isinstance(line, py.builtin.text)
+ msg = py.builtin._totext('\n').join(expl)
+ assert msg
+
+
+class TestFormatExplanation:
+
+ def test_special_chars_full(self, testdir):
+ # Issue 453, for the bug this would raise IndexError
+ testdir.makepyfile("""
+ def test_foo():
+ assert '\\n}' == ''
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*AssertionError*",
+ ])
+
+ def test_fmt_simple(self):
+ expl = 'assert foo'
+ assert util.format_explanation(expl) == 'assert foo'
+
+ def test_fmt_where(self):
+ expl = '\n'.join(['assert 1',
+ '{1 = foo',
+ '} == 2'])
+ res = '\n'.join(['assert 1 == 2',
+ ' + where 1 = foo'])
+ assert util.format_explanation(expl) == res
+
+ def test_fmt_and(self):
+ expl = '\n'.join(['assert 1',
+ '{1 = foo',
+ '} == 2',
+ '{2 = bar',
+ '}'])
+ res = '\n'.join(['assert 1 == 2',
+ ' + where 1 = foo',
+ ' + and 2 = bar'])
+ assert util.format_explanation(expl) == res
+
+ def test_fmt_where_nested(self):
+ expl = '\n'.join(['assert 1',
+ '{1 = foo',
+ '{foo = bar',
+ '}',
+ '} == 2'])
+ res = '\n'.join(['assert 1 == 2',
+ ' + where 1 = foo',
+ ' + where foo = bar'])
+ assert util.format_explanation(expl) == res
+
+ def test_fmt_newline(self):
+ expl = '\n'.join(['assert "foo" == "bar"',
+ '~- foo',
+ '~+ bar'])
+ res = '\n'.join(['assert "foo" == "bar"',
+ ' - foo',
+ ' + bar'])
+ assert util.format_explanation(expl) == res
+
+ def test_fmt_newline_escaped(self):
+ expl = '\n'.join(['assert foo == bar',
+ 'baz'])
+ res = 'assert foo == bar\\nbaz'
+ assert util.format_explanation(expl) == res
+
+ def test_fmt_newline_before_where(self):
+ expl = '\n'.join(['the assertion message here',
+ '>assert 1',
+ '{1 = foo',
+ '} == 2',
+ '{2 = bar',
+ '}'])
+ res = '\n'.join(['the assertion message here',
+ 'assert 1 == 2',
+ ' + where 1 = foo',
+ ' + and 2 = bar'])
+ assert util.format_explanation(expl) == res
+
+ def test_fmt_multi_newline_before_where(self):
+ expl = '\n'.join(['the assertion',
+ '~message here',
+ '>assert 1',
+ '{1 = foo',
+ '} == 2',
+ '{2 = bar',
+ '}'])
+ res = '\n'.join(['the assertion',
+ ' message here',
+ 'assert 1 == 2',
+ ' + where 1 = foo',
+ ' + and 2 = bar'])
+ assert util.format_explanation(expl) == res
+
+
+def test_python25_compile_issue257(testdir):
+ testdir.makepyfile("""
+ def test_rewritten():
+ assert 1 == 2
+ # some comment
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines("""
+ *E*assert 1 == 2*
+ *1 failed*
+ """)
+
+def test_rewritten(testdir):
+ testdir.makepyfile("""
+ def test_rewritten():
+ assert "@py_builtins" in globals()
+ """)
+ assert testdir.runpytest().ret == 0
+
+def test_reprcompare_notin(mock_config):
+ detail = plugin.pytest_assertrepr_compare(
+ mock_config, 'not in', 'foo', 'aaafoobbb')[1:]
+ assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
+
+def test_pytest_assertrepr_compare_integration(testdir):
+ testdir.makepyfile("""
+ def test_hello():
+ x = set(range(100))
+ y = x.copy()
+ y.remove(50)
+ assert x == y
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*def test_hello():*",
+ "*assert x == y*",
+ "*E*Extra items*left*",
+ "*E*50*",
+ ])
+
+def test_sequence_comparison_uses_repr(testdir):
+ testdir.makepyfile("""
+ def test_hello():
+ x = set("hello x")
+ y = set("hello y")
+ assert x == y
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*def test_hello():*",
+ "*assert x == y*",
+ "*E*Extra items*left*",
+ "*E*'x'*",
+ "*E*Extra items*right*",
+ "*E*'y'*",
+ ])
+
+
+def test_assert_compare_truncate_longmessage(monkeypatch, testdir):
+ testdir.makepyfile(r"""
+ def test_long():
+ a = list(range(200))
+ b = a[::2]
+ a = '\n'.join(map(str, a))
+ b = '\n'.join(map(str, b))
+ assert a == b
+ """)
+ monkeypatch.delenv('CI', raising=False)
+
+ result = testdir.runpytest()
+ # without -vv, truncate the message showing a few diff lines only
+ result.stdout.fnmatch_lines([
+ "*- 1",
+ "*- 3",
+ "*- 5",
+ "*- 7",
+ "*truncated (191 more lines)*use*-vv*",
+ ])
+
+
+ result = testdir.runpytest('-vv')
+ result.stdout.fnmatch_lines([
+ "*- 197",
+ ])
+
+ monkeypatch.setenv('CI', '1')
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*- 197",
+ ])
+
+
+def test_assertrepr_loaded_per_dir(testdir):
+ testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
+ a = testdir.mkdir('a')
+ a_test = a.join('test_a.py')
+ a_test.write('def test_a(): assert 1 == 2')
+ a_conftest = a.join('conftest.py')
+ a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
+ b = testdir.mkdir('b')
+ b_test = b.join('test_b.py')
+ b_test.write('def test_b(): assert 1 == 2')
+ b_conftest = b.join('conftest.py')
+ b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ '*def test_base():*',
+ '*E*assert 1 == 2*',
+ '*def test_a():*',
+ '*E*assert summary a*',
+ '*def test_b():*',
+ '*E*assert summary b*'])
+
+
+def test_assertion_options(testdir):
+ testdir.makepyfile("""
+ def test_hello():
+ x = 3
+ assert x == 4
+ """)
+ result = testdir.runpytest()
+ assert "3 == 4" in result.stdout.str()
+ off_options = (("--no-assert",),
+ ("--nomagic",),
+ ("--no-assert", "--nomagic"),
+ ("--assert=plain",),
+ ("--assert=plain", "--no-assert"),
+ ("--assert=plain", "--nomagic"),
+ ("--assert=plain", "--no-assert", "--nomagic"))
+ for opt in off_options:
+ result = testdir.runpytest_subprocess(*opt)
+ assert "3 == 4" not in result.stdout.str()
+
+def test_old_assert_mode(testdir):
+ testdir.makepyfile("""
+ def test_in_old_mode():
+ assert "@py_builtins" not in globals()
+ """)
+ result = testdir.runpytest_subprocess("--assert=reinterp")
+ assert result.ret == 0
+
+def test_triple_quoted_string_issue113(testdir):
+ testdir.makepyfile("""
+ def test_hello():
+ assert "" == '''
+ '''""")
+ result = testdir.runpytest("--fulltrace")
+ result.stdout.fnmatch_lines([
+ "*1 failed*",
+ ])
+ assert 'SyntaxError' not in result.stdout.str()
+
+def test_traceback_failure(testdir):
+ p1 = testdir.makepyfile("""
+ def g():
+ return 2
+ def f(x):
+ assert x == g()
+ def test_onefails():
+ f(3)
+ """)
+ result = testdir.runpytest(p1, "--tb=long")
+ result.stdout.fnmatch_lines([
+ "*test_traceback_failure.py F",
+ "====* FAILURES *====",
+ "____*____",
+ "",
+ " def test_onefails():",
+ "> f(3)",
+ "",
+ "*test_*.py:6: ",
+ "_ _ _ *",
+ #"",
+ " def f(x):",
+ "> assert x == g()",
+ "E assert 3 == 2",
+ "E + where 2 = g()",
+ "",
+ "*test_traceback_failure.py:4: AssertionError"
+ ])
+
+ result = testdir.runpytest(p1) # "auto"
+ result.stdout.fnmatch_lines([
+ "*test_traceback_failure.py F",
+ "====* FAILURES *====",
+ "____*____",
+ "",
+ " def test_onefails():",
+ "> f(3)",
+ "",
+ "*test_*.py:6: ",
+ "",
+ " def f(x):",
+ "> assert x == g()",
+ "E assert 3 == 2",
+ "E + where 2 = g()",
+ "",
+ "*test_traceback_failure.py:4: AssertionError"
+ ])
+
+@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
+def test_warn_missing(testdir):
+ testdir.makepyfile("")
+ result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
+ result.stderr.fnmatch_lines([
+ "*WARNING*assert statements are not executed*",
+ ])
+ result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert")
+ result.stderr.fnmatch_lines([
+ "*WARNING*assert statements are not executed*",
+ ])
+
+def test_recursion_source_decode(testdir):
+ testdir.makepyfile("""
+ def test_something():
+ pass
+ """)
+ testdir.makeini("""
+ [pytest]
+ python_files = *.py
+ """)
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines("""
+ <Module*>
+ """)
+
+def test_AssertionError_message(testdir):
+ testdir.makepyfile("""
+ def test_hello():
+ x,y = 1,2
+ assert 0, (x,y)
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *def test_hello*
+ *assert 0, (x,y)*
+ *AssertionError: (1, 2)*
+ """)
+
+@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3')
+def test_set_with_unsortable_elements():
+ # issue #718
+ class UnsortableKey(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __lt__(self, other):
+ raise RuntimeError()
+
+ def __repr__(self):
+ return 'repr({0})'.format(self.name)
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def __hash__(self):
+ return hash(self.name)
+
+ left_set = set(UnsortableKey(str(i)) for i in range(1, 3))
+ right_set = set(UnsortableKey(str(i)) for i in range(2, 4))
+ expl = callequal(left_set, right_set, verbose=True)
+ # skip first line because it contains the "construction" of the set, which does not have a guaranteed order
+ expl = expl[1:]
+ dedent = textwrap.dedent("""
+ Extra items in the left set:
+ repr(1)
+ Extra items in the right set:
+ repr(3)
+ Full diff (fallback to calling repr on each item):
+ - repr(1)
+ repr(2)
+ + repr(3)
+ """).strip()
+ assert '\n'.join(expl) == dedent
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_assertrewrite.py b/testing/web-platform/tests/tools/pytest/testing/test_assertrewrite.py
new file mode 100644
index 000000000..f43c424ca
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_assertrewrite.py
@@ -0,0 +1,716 @@
+import os
+import stat
+import sys
+import zipfile
+import py
+import pytest
+
+ast = pytest.importorskip("ast")
+if sys.platform.startswith("java"):
+ # XXX should be xfail
+ pytest.skip("assert rewrite does currently not work on jython")
+
+import _pytest._code
+from _pytest.assertion import util
+from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+
+
+def setup_module(mod):
+ mod._old_reprcompare = util._reprcompare
+ _pytest._code._reprcompare = None
+
+def teardown_module(mod):
+ util._reprcompare = mod._old_reprcompare
+ del mod._old_reprcompare
+
+
+def rewrite(src):
+ tree = ast.parse(src)
+ rewrite_asserts(tree)
+ return tree
+
+def getmsg(f, extra_ns=None, must_pass=False):
+ """Rewrite the assertions in f, run it, and get the failure message."""
+ src = '\n'.join(_pytest._code.Code(f).source().lines)
+ mod = rewrite(src)
+ code = compile(mod, "<test>", "exec")
+ ns = {}
+ if extra_ns is not None:
+ ns.update(extra_ns)
+ py.builtin.exec_(code, ns)
+ func = ns[f.__name__]
+ try:
+ func()
+ except AssertionError:
+ if must_pass:
+ pytest.fail("shouldn't have raised")
+ s = str(sys.exc_info()[1])
+ if not s.startswith("assert"):
+ return "AssertionError: " + s
+ return s
+ else:
+ if not must_pass:
+ pytest.fail("function didn't raise at all")
+
+
+class TestAssertionRewrite:
+
+ def test_place_initial_imports(self):
+ s = """'Doc string'\nother = stuff"""
+ m = rewrite(s)
+ assert isinstance(m.body[0], ast.Expr)
+ assert isinstance(m.body[0].value, ast.Str)
+ for imp in m.body[1:3]:
+ assert isinstance(imp, ast.Import)
+ assert imp.lineno == 2
+ assert imp.col_offset == 0
+ assert isinstance(m.body[3], ast.Assign)
+ s = """from __future__ import with_statement\nother_stuff"""
+ m = rewrite(s)
+ assert isinstance(m.body[0], ast.ImportFrom)
+ for imp in m.body[1:3]:
+ assert isinstance(imp, ast.Import)
+ assert imp.lineno == 2
+ assert imp.col_offset == 0
+ assert isinstance(m.body[3], ast.Expr)
+ s = """'doc string'\nfrom __future__ import with_statement\nother"""
+ m = rewrite(s)
+ assert isinstance(m.body[0], ast.Expr)
+ assert isinstance(m.body[0].value, ast.Str)
+ assert isinstance(m.body[1], ast.ImportFrom)
+ for imp in m.body[2:4]:
+ assert isinstance(imp, ast.Import)
+ assert imp.lineno == 3
+ assert imp.col_offset == 0
+ assert isinstance(m.body[4], ast.Expr)
+ s = """from . import relative\nother_stuff"""
+ m = rewrite(s)
+ for imp in m.body[0:2]:
+ assert isinstance(imp, ast.Import)
+ assert imp.lineno == 1
+ assert imp.col_offset == 0
+ assert isinstance(m.body[3], ast.Expr)
+
+ def test_dont_rewrite(self):
+ s = """'PYTEST_DONT_REWRITE'\nassert 14"""
+ m = rewrite(s)
+ assert len(m.body) == 2
+ assert isinstance(m.body[0].value, ast.Str)
+ assert isinstance(m.body[1], ast.Assert)
+ assert m.body[1].msg is None
+
+ def test_name(self):
+ def f():
+ assert False
+ assert getmsg(f) == "assert False"
+ def f():
+ f = False
+ assert f
+ assert getmsg(f) == "assert False"
+ def f():
+ assert a_global # noqa
+ assert getmsg(f, {"a_global" : False}) == "assert False"
+ def f():
+ assert sys == 42
+ assert getmsg(f, {"sys" : sys}) == "assert sys == 42"
+ def f():
+ assert cls == 42 # noqa
+ class X(object):
+ pass
+ assert getmsg(f, {"cls" : X}) == "assert cls == 42"
+
+ def test_assert_already_has_message(self):
+ def f():
+ assert False, "something bad!"
+ assert getmsg(f) == "AssertionError: something bad!\nassert False"
+
+ def test_assertion_message(self, testdir):
+ testdir.makepyfile("""
+ def test_foo():
+ assert 1 == 2, "The failure message"
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*AssertionError*The failure message*",
+ "*assert 1 == 2*",
+ ])
+
+ def test_assertion_message_multiline(self, testdir):
+ testdir.makepyfile("""
+ def test_foo():
+ assert 1 == 2, "A multiline\\nfailure message"
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*AssertionError*A multiline*",
+ "*failure message*",
+ "*assert 1 == 2*",
+ ])
+
+ def test_assertion_message_tuple(self, testdir):
+ testdir.makepyfile("""
+ def test_foo():
+ assert 1 == 2, (1, 2)
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*AssertionError*%s*" % repr((1, 2)),
+ "*assert 1 == 2*",
+ ])
+
+ def test_assertion_message_expr(self, testdir):
+ testdir.makepyfile("""
+ def test_foo():
+ assert 1 == 2, 1 + 2
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*AssertionError*3*",
+ "*assert 1 == 2*",
+ ])
+
+ def test_assertion_message_escape(self, testdir):
+ testdir.makepyfile("""
+ def test_foo():
+ assert 1 == 2, 'To be escaped: %'
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*AssertionError: To be escaped: %",
+ "*assert 1 == 2",
+ ])
+
+ def test_boolop(self):
+ def f():
+ f = g = False
+ assert f and g
+ assert getmsg(f) == "assert (False)"
+ def f():
+ f = True
+ g = False
+ assert f and g
+ assert getmsg(f) == "assert (True and False)"
+ def f():
+ f = False
+ g = True
+ assert f and g
+ assert getmsg(f) == "assert (False)"
+ def f():
+ f = g = False
+ assert f or g
+ assert getmsg(f) == "assert (False or False)"
+ def f():
+ f = g = False
+ assert not f and not g
+ getmsg(f, must_pass=True)
+ def x():
+ return False
+ def f():
+ assert x() and x()
+ assert getmsg(f, {"x" : x}) == "assert (x())"
+ def f():
+ assert False or x()
+ assert getmsg(f, {"x" : x}) == "assert (False or x())"
+ def f():
+ assert 1 in {} and 2 in {}
+ assert getmsg(f) == "assert (1 in {})"
+ def f():
+ x = 1
+ y = 2
+ assert x in {1 : None} and y in {}
+ assert getmsg(f) == "assert (1 in {1: None} and 2 in {})"
+ def f():
+ f = True
+ g = False
+ assert f or g
+ getmsg(f, must_pass=True)
+ def f():
+ f = g = h = lambda: True
+ assert f() and g() and h()
+ getmsg(f, must_pass=True)
+
+ def test_short_circut_evaluation(self):
+ def f():
+ assert True or explode # noqa
+ getmsg(f, must_pass=True)
+ def f():
+ x = 1
+ assert x == 1 or x == 2
+ getmsg(f, must_pass=True)
+
+ def test_unary_op(self):
+ def f():
+ x = True
+ assert not x
+ assert getmsg(f) == "assert not True"
+ def f():
+ x = 0
+ assert ~x + 1
+ assert getmsg(f) == "assert (~0 + 1)"
+ def f():
+ x = 3
+ assert -x + x
+ assert getmsg(f) == "assert (-3 + 3)"
+ def f():
+ x = 0
+ assert +x + x
+ assert getmsg(f) == "assert (+0 + 0)"
+
+ def test_binary_op(self):
+ def f():
+ x = 1
+ y = -1
+ assert x + y
+ assert getmsg(f) == "assert (1 + -1)"
+ def f():
+ assert not 5 % 4
+ assert getmsg(f) == "assert not (5 % 4)"
+
+ def test_boolop_percent(self):
+ def f():
+ assert 3 % 2 and False
+ assert getmsg(f) == "assert ((3 % 2) and False)"
+ def f():
+ assert False or 4 % 2
+ assert getmsg(f) == "assert (False or (4 % 2))"
+
+ @pytest.mark.skipif("sys.version_info < (3,5)")
+ def test_at_operator_issue1290(self, testdir):
+ testdir.makepyfile("""
+ class Matrix:
+ def __init__(self, num):
+ self.num = num
+ def __matmul__(self, other):
+ return self.num * other.num
+
+ def test_multmat_operator():
+ assert Matrix(2) @ Matrix(3) == 6""")
+ testdir.runpytest().assert_outcomes(passed=1)
+
+ def test_call(self):
+ def g(a=42, *args, **kwargs):
+ return False
+ ns = {"g" : g}
+ def f():
+ assert g()
+ assert getmsg(f, ns) == """assert g()"""
+ def f():
+ assert g(1)
+ assert getmsg(f, ns) == """assert g(1)"""
+ def f():
+ assert g(1, 2)
+ assert getmsg(f, ns) == """assert g(1, 2)"""
+ def f():
+ assert g(1, g=42)
+ assert getmsg(f, ns) == """assert g(1, g=42)"""
+ def f():
+ assert g(1, 3, g=23)
+ assert getmsg(f, ns) == """assert g(1, 3, g=23)"""
+ def f():
+ seq = [1, 2, 3]
+ assert g(*seq)
+ assert getmsg(f, ns) == """assert g(*[1, 2, 3])"""
+ def f():
+ x = "a"
+ assert g(**{x : 2})
+ assert getmsg(f, ns) == """assert g(**{'a': 2})"""
+
+ def test_attribute(self):
+ class X(object):
+ g = 3
+ ns = {"x" : X}
+ def f():
+ assert not x.g # noqa
+ assert getmsg(f, ns) == """assert not 3
+ + where 3 = x.g"""
+ def f():
+ x.a = False # noqa
+ assert x.a # noqa
+ assert getmsg(f, ns) == """assert x.a"""
+
+ def test_comparisons(self):
+ def f():
+ a, b = range(2)
+ assert b < a
+ assert getmsg(f) == """assert 1 < 0"""
+ def f():
+ a, b, c = range(3)
+ assert a > b > c
+ assert getmsg(f) == """assert 0 > 1"""
+ def f():
+ a, b, c = range(3)
+ assert a < b > c
+ assert getmsg(f) == """assert 1 > 2"""
+ def f():
+ a, b, c = range(3)
+ assert a < b <= c
+ getmsg(f, must_pass=True)
+ def f():
+ a, b, c = range(3)
+ assert a < b
+ assert b < c
+ getmsg(f, must_pass=True)
+
+ def test_len(self):
+ def f():
+ l = list(range(10))
+ assert len(l) == 11
+ assert getmsg(f).startswith("""assert 10 == 11
+ + where 10 = len([""")
+
+ def test_custom_reprcompare(self, monkeypatch):
+ def my_reprcompare(op, left, right):
+ return "42"
+ monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
+ def f():
+ assert 42 < 3
+ assert getmsg(f) == "assert 42"
+ def my_reprcompare(op, left, right):
+ return "%s %s %s" % (left, op, right)
+ monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
+ def f():
+ assert 1 < 3 < 5 <= 4 < 7
+ assert getmsg(f) == "assert 5 <= 4"
+
+ def test_assert_raising_nonzero_in_comparison(self):
+ def f():
+ class A(object):
+ def __nonzero__(self):
+ raise ValueError(42)
+ def __lt__(self, other):
+ return A()
+ def __repr__(self):
+ return "<MY42 object>"
+ def myany(x):
+ return False
+ assert myany(A() < 0)
+ assert "<MY42 object> < 0" in getmsg(f)
+
+ def test_formatchar(self):
+ def f():
+ assert "%test" == "test"
+ assert getmsg(f).startswith("assert '%test' == 'test'")
+
+ def test_custom_repr(self):
+ def f():
+ class Foo(object):
+ a = 1
+
+ def __repr__(self):
+ return "\n{ \n~ \n}"
+ f = Foo()
+ assert 0 == f.a
+ assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0]
+
+
+class TestRewriteOnImport:
+
+ def test_pycache_is_a_file(self, testdir):
+ testdir.tmpdir.join("__pycache__").write("Hello")
+ testdir.makepyfile("""
+ def test_rewritten():
+ assert "@py_builtins" in globals()""")
+ assert testdir.runpytest().ret == 0
+
+ def test_pycache_is_readonly(self, testdir):
+ cache = testdir.tmpdir.mkdir("__pycache__")
+ old_mode = cache.stat().mode
+ cache.chmod(old_mode ^ stat.S_IWRITE)
+ testdir.makepyfile("""
+ def test_rewritten():
+ assert "@py_builtins" in globals()""")
+ try:
+ assert testdir.runpytest().ret == 0
+ finally:
+ cache.chmod(old_mode)
+
+ def test_zipfile(self, testdir):
+ z = testdir.tmpdir.join("myzip.zip")
+ z_fn = str(z)
+ f = zipfile.ZipFile(z_fn, "w")
+ try:
+ f.writestr("test_gum/__init__.py", "")
+ f.writestr("test_gum/test_lizard.py", "")
+ finally:
+ f.close()
+ z.chmod(256)
+ testdir.makepyfile("""
+ import sys
+ sys.path.append(%r)
+ import test_gum.test_lizard""" % (z_fn,))
+ assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
+
+ def test_readonly(self, testdir):
+ sub = testdir.mkdir("testing")
+ sub.join("test_readonly.py").write(
+ py.builtin._totext("""
+def test_rewritten():
+ assert "@py_builtins" in globals()
+ """).encode("utf-8"), "wb")
+ old_mode = sub.stat().mode
+ sub.chmod(320)
+ try:
+ assert testdir.runpytest().ret == 0
+ finally:
+ sub.chmod(old_mode)
+
+ def test_dont_write_bytecode(self, testdir, monkeypatch):
+ testdir.makepyfile("""
+ import os
+ def test_no_bytecode():
+ assert "__pycache__" in __cached__
+ assert not os.path.exists(__cached__)
+ assert not os.path.exists(os.path.dirname(__cached__))""")
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
+ assert testdir.runpytest_subprocess().ret == 0
+
+ @pytest.mark.skipif('"__pypy__" in sys.modules')
+ def test_pyc_vs_pyo(self, testdir, monkeypatch):
+ testdir.makepyfile("""
+ import pytest
+ def test_optimized():
+ "hello"
+ assert test_optimized.__doc__ is None"""
+ )
+ p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None,
+ rootdir=testdir.tmpdir)
+ tmp = "--basetemp=%s" % p
+ monkeypatch.setenv("PYTHONOPTIMIZE", "2")
+ monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
+ assert testdir.runpytest_subprocess(tmp).ret == 0
+ tagged = "test_pyc_vs_pyo." + PYTEST_TAG
+ assert tagged + ".pyo" in os.listdir("__pycache__")
+ monkeypatch.undo()
+ monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
+ assert testdir.runpytest_subprocess(tmp).ret == 1
+ assert tagged + ".pyc" in os.listdir("__pycache__")
+
+ def test_package(self, testdir):
+ pkg = testdir.tmpdir.join("pkg")
+ pkg.mkdir()
+ pkg.join("__init__.py").ensure()
+ pkg.join("test_blah.py").write("""
+def test_rewritten():
+ assert "@py_builtins" in globals()""")
+ assert testdir.runpytest().ret == 0
+
+ def test_translate_newlines(self, testdir):
+ content = "def test_rewritten():\r\n assert '@py_builtins' in globals()"
+ b = content.encode("utf-8")
+ testdir.tmpdir.join("test_newlines.py").write(b, "wb")
+ assert testdir.runpytest().ret == 0
+
+ @pytest.mark.skipif(sys.version_info < (3,3),
+ reason='packages without __init__.py not supported on python 2')
+ def test_package_without__init__py(self, testdir):
+ pkg = testdir.mkdir('a_package_without_init_py')
+ pkg.join('module.py').ensure()
+ testdir.makepyfile("import a_package_without_init_py.module")
+ assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED
+
+class TestAssertionRewriteHookDetails(object):
+ def test_loader_is_package_false_for_module(self, testdir):
+ testdir.makepyfile(test_fun="""
+ def test_loader():
+ assert not __loader__.is_package(__name__)
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "* 1 passed*",
+ ])
+
+ def test_loader_is_package_true_for_package(self, testdir):
+ testdir.makepyfile(test_fun="""
+ def test_loader():
+ assert not __loader__.is_package(__name__)
+
+ def test_fun():
+ assert __loader__.is_package('fun')
+
+ def test_missing():
+ assert not __loader__.is_package('pytest_not_there')
+ """)
+ testdir.mkpydir('fun')
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ '* 3 passed*',
+ ])
+
+ @pytest.mark.skipif("sys.version_info[0] >= 3")
+ @pytest.mark.xfail("hasattr(sys, 'pypy_translation_info')")
+ def test_assume_ascii(self, testdir):
+ content = "u'\xe2\x99\xa5\x01\xfe'"
+ testdir.tmpdir.join("test_encoding.py").write(content, "wb")
+ res = testdir.runpytest()
+ assert res.ret != 0
+ assert "SyntaxError: Non-ASCII character" in res.stdout.str()
+
+ @pytest.mark.skipif("sys.version_info[0] >= 3")
+ def test_detect_coding_cookie(self, testdir):
+ testdir.makepyfile(test_cookie="""
+ # -*- coding: utf-8 -*-
+ u"St\xc3\xa4d"
+ def test_rewritten():
+ assert "@py_builtins" in globals()""")
+ assert testdir.runpytest().ret == 0
+
+ @pytest.mark.skipif("sys.version_info[0] >= 3")
+ def test_detect_coding_cookie_second_line(self, testdir):
+ testdir.makepyfile(test_cookie="""
+ # -*- coding: utf-8 -*-
+ u"St\xc3\xa4d"
+ def test_rewritten():
+ assert "@py_builtins" in globals()""")
+ assert testdir.runpytest().ret == 0
+
+ @pytest.mark.skipif("sys.version_info[0] >= 3")
+ def test_detect_coding_cookie_crlf(self, testdir):
+ testdir.makepyfile(test_cookie="""
+ # -*- coding: utf-8 -*-
+ u"St\xc3\xa4d"
+ def test_rewritten():
+ assert "@py_builtins" in globals()""")
+ assert testdir.runpytest().ret == 0
+
+ def test_sys_meta_path_munged(self, testdir):
+ testdir.makepyfile("""
+ def test_meta_path():
+ import sys; sys.meta_path = []""")
+ assert testdir.runpytest().ret == 0
+
+ def test_write_pyc(self, testdir, tmpdir, monkeypatch):
+ from _pytest.assertion.rewrite import _write_pyc
+ from _pytest.assertion import AssertionState
+ try:
+ import __builtin__ as b
+ except ImportError:
+ import builtins as b
+ config = testdir.parseconfig([])
+ state = AssertionState(config, "rewrite")
+ source_path = tmpdir.ensure("source.py")
+ pycpath = tmpdir.join("pyc").strpath
+ assert _write_pyc(state, [1], source_path.stat(), pycpath)
+ def open(*args):
+ e = IOError()
+ e.errno = 10
+ raise e
+ monkeypatch.setattr(b, "open", open)
+ assert not _write_pyc(state, [1], source_path.stat(), pycpath)
+
+ def test_resources_provider_for_loader(self, testdir):
+ """
+ Attempts to load resources from a package should succeed normally,
+ even when the AssertionRewriteHook is used to load the modules.
+
+ See #366 for details.
+ """
+ pytest.importorskip("pkg_resources")
+
+ testdir.mkpydir('testpkg')
+ contents = {
+ 'testpkg/test_pkg': """
+ import pkg_resources
+
+ import pytest
+ from _pytest.assertion.rewrite import AssertionRewritingHook
+
+ def test_load_resource():
+ assert isinstance(__loader__, AssertionRewritingHook)
+ res = pkg_resources.resource_string(__name__, 'resource.txt')
+ res = res.decode('ascii')
+ assert res == 'Load me please.'
+ """,
+ }
+ testdir.makepyfile(**contents)
+ testdir.maketxtfile(**{'testpkg/resource': "Load me please."})
+
+ result = testdir.runpytest_subprocess()
+ result.assert_outcomes(passed=1)
+
+ def test_read_pyc(self, tmpdir):
+ """
+ Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
+ In those circumstances it should just give up instead of generating
+ an exception that is propagated to the caller.
+ """
+ import py_compile
+ from _pytest.assertion.rewrite import _read_pyc
+
+ source = tmpdir.join('source.py')
+ pyc = source + 'c'
+
+ source.write('def test(): pass')
+ py_compile.compile(str(source), str(pyc))
+
+ contents = pyc.read(mode='rb')
+ strip_bytes = 20 # header is around 8 bytes, strip a little more
+ assert len(contents) > strip_bytes
+ pyc.write(contents[:strip_bytes], mode='wb')
+
+ assert _read_pyc(source, str(pyc)) is None # no error
+
+ def test_reload_is_same(self, testdir):
+ # A file that will be picked up during collecting.
+ testdir.tmpdir.join("file.py").ensure()
+ testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
+ [pytest]
+ python_files = *.py
+ """))
+
+ testdir.makepyfile(test_fun="""
+ import sys
+ try:
+ from imp import reload
+ except ImportError:
+ pass
+
+ def test_loader():
+ import file
+ assert sys.modules["file"] is reload(file)
+ """)
+ result = testdir.runpytest('-s')
+ result.stdout.fnmatch_lines([
+ "* 1 passed*",
+ ])
+
+ def test_get_data_support(self, testdir):
+ """Implement optional PEP302 api (#808).
+ """
+ path = testdir.mkpydir("foo")
+ path.join("test_foo.py").write(_pytest._code.Source("""
+ class Test:
+ def test_foo(self):
+ import pkgutil
+ data = pkgutil.get_data('foo.test_foo', 'data.txt')
+ assert data == b'Hey'
+ """))
+ path.join('data.txt').write('Hey')
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('*1 passed*')
+
+
+def test_issue731(testdir):
+ testdir.makepyfile("""
+ class LongReprWithBraces(object):
+ def __repr__(self):
+ return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')'
+
+ def some_method(self):
+ return False
+
+ def test_long_repr():
+ obj = LongReprWithBraces()
+ assert obj.some_method()
+ """)
+ result = testdir.runpytest()
+ assert 'unbalanced braces' not in result.stdout.str()
+
+
+def test_collapse_false_unbalanced_braces():
+ util._collapse_false('some text{ False\n{False = some more text\n}')
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_cache.py b/testing/web-platform/tests/tools/pytest/testing/test_cache.py
new file mode 100755
index 000000000..98053f869
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_cache.py
@@ -0,0 +1,386 @@
+import sys
+
+import _pytest
+import pytest
+import os
+import shutil
+
+pytest_plugins = "pytester",
+
+class TestNewAPI:
+ def test_config_cache_makedir(self, testdir):
+ testdir.makeini("[pytest]")
+ config = testdir.parseconfigure()
+ with pytest.raises(ValueError):
+ config.cache.makedir("key/name")
+
+ p = config.cache.makedir("name")
+ assert p.check()
+
+ def test_config_cache_dataerror(self, testdir):
+ testdir.makeini("[pytest]")
+ config = testdir.parseconfigure()
+ cache = config.cache
+ pytest.raises(TypeError, lambda: cache.set("key/name", cache))
+ config.cache.set("key/name", 0)
+ config.cache._getvaluepath("key/name").write("123invalid")
+ val = config.cache.get("key/name", -2)
+ assert val == -2
+
+ def test_cache_writefail_cachfile_silent(self, testdir):
+ testdir.makeini("[pytest]")
+ testdir.tmpdir.join('.cache').write('gone wrong')
+ config = testdir.parseconfigure()
+ cache = config.cache
+ cache.set('test/broken', [])
+
+ @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows')
+ def test_cache_writefail_permissions(self, testdir):
+ testdir.makeini("[pytest]")
+ testdir.tmpdir.ensure_dir('.cache').chmod(0)
+ config = testdir.parseconfigure()
+ cache = config.cache
+ cache.set('test/broken', [])
+
+ @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows')
+ def test_cache_failure_warns(self, testdir):
+ testdir.tmpdir.ensure_dir('.cache').chmod(0)
+ testdir.makepyfile("""
+ def test_error():
+ raise Exception
+
+ """)
+ result = testdir.runpytest('-rw')
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*could not create cache path*",
+ "*1 pytest-warnings*",
+ ])
+
+ def test_config_cache(self, testdir):
+ testdir.makeconftest("""
+ def pytest_configure(config):
+ # see that we get cache information early on
+ assert hasattr(config, "cache")
+ """)
+ testdir.makepyfile("""
+ def test_session(pytestconfig):
+ assert hasattr(pytestconfig, "cache")
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_cachefuncarg(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_cachefuncarg(cache):
+ val = cache.get("some/thing", None)
+ assert val is None
+ cache.set("some/thing", [1])
+ pytest.raises(TypeError, lambda: cache.get("some/thing"))
+ val = cache.get("some/thing", [])
+ assert val == [1]
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+
+
+def test_cache_reportheader(testdir):
+ testdir.makepyfile("""
+ def test_hello():
+ pass
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "cachedir: .cache"
+ ])
+
+
+def test_cache_show(testdir):
+ result = testdir.runpytest("--cache-show")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*cache is empty*"
+ ])
+ testdir.makeconftest("""
+ def pytest_configure(config):
+ config.cache.set("my/name", [1,2,3])
+ config.cache.set("other/some", {1:2})
+ dp = config.cache.makedir("mydb")
+ dp.ensure("hello")
+ dp.ensure("world")
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 5 # no tests executed
+ result = testdir.runpytest("--cache-show")
+ result.stdout.fnmatch_lines_random([
+ "*cachedir:*",
+ "-*cache values*-",
+ "*my/name contains:",
+ " [1, 2, 3]",
+ "*other/some contains*",
+ " {*1*: 2}",
+ "-*cache directories*-",
+ "*mydb/hello*length 0*",
+ "*mydb/world*length 0*",
+ ])
+
+
+class TestLastFailed:
+
+ def test_lastfailed_usecase(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
+ p = testdir.makepyfile("""
+ def test_1():
+ assert 0
+ def test_2():
+ assert 0
+ def test_3():
+ assert 1
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+ p.write(_pytest._code.Source("""
+ def test_1():
+ assert 1
+
+ def test_2():
+ assert 1
+
+ def test_3():
+ assert 0
+ """))
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines([
+ "*2 passed*1 desel*",
+ ])
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines([
+ "*1 failed*2 passed*",
+ ])
+ result = testdir.runpytest("--lf", "--cache-clear")
+ result.stdout.fnmatch_lines([
+ "*1 failed*2 passed*",
+ ])
+
+ # Run this again to make sure clear-cache is robust
+ if os.path.isdir('.cache'):
+ shutil.rmtree('.cache')
+ result = testdir.runpytest("--lf", "--cache-clear")
+ result.stdout.fnmatch_lines([
+ "*1 failed*2 passed*",
+ ])
+
+ def test_failedfirst_order(self, testdir):
+ testdir.tmpdir.join('test_a.py').write(_pytest._code.Source("""
+ def test_always_passes():
+ assert 1
+ """))
+ testdir.tmpdir.join('test_b.py').write(_pytest._code.Source("""
+ def test_always_fails():
+ assert 0
+ """))
+ result = testdir.runpytest()
+ # Test order will be collection order; alphabetical
+ result.stdout.fnmatch_lines([
+ "test_a.py*",
+ "test_b.py*",
+ ])
+ result = testdir.runpytest("--lf", "--ff")
+ # Test order will be failing tests firs
+ result.stdout.fnmatch_lines([
+ "test_b.py*",
+ "test_a.py*",
+ ])
+
+ def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
+ testdir.makepyfile(test_a="""
+ def test_a1():
+ assert 0
+ def test_a2():
+ assert 1
+ """, test_b="""
+ def test_b1():
+ assert 0
+ """)
+ p = testdir.tmpdir.join("test_a.py")
+ p2 = testdir.tmpdir.join("test_b.py")
+
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+ result = testdir.runpytest("--lf", p2)
+ result.stdout.fnmatch_lines([
+ "*1 failed*",
+ ])
+ p2.write(_pytest._code.Source("""
+ def test_b1():
+ assert 1
+ """))
+ result = testdir.runpytest("--lf", p2)
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+ result = testdir.runpytest("--lf", p)
+ result.stdout.fnmatch_lines([
+ "*1 failed*1 desel*",
+ ])
+
+ def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
+ testdir.makepyfile("""
+ def test_1():
+ assert 0
+ """)
+ p2 = testdir.tmpdir.join("test_something.py")
+ p2.write(_pytest._code.Source("""
+ def test_2():
+ assert 0
+ """))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+ result = testdir.runpytest("--lf", p2)
+ result.stdout.fnmatch_lines([
+ "*1 failed*",
+ ])
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+
+ def test_lastfailed_xpass(self, testdir):
+ testdir.inline_runsource("""
+ import pytest
+ @pytest.mark.xfail
+ def test_hello():
+ assert 1
+ """)
+ config = testdir.parseconfigure()
+ lastfailed = config.cache.get("cache/lastfailed", -1)
+ assert lastfailed == -1
+
+ def test_non_serializable_parametrize(self, testdir):
+ """Test that failed parametrized tests with unmarshable parameters
+ don't break pytest-cache.
+ """
+ testdir.makepyfile(r"""
+ import pytest
+
+ @pytest.mark.parametrize('val', [
+ b'\xac\x10\x02G',
+ ])
+ def test_fail(val):
+ assert False
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('*1 failed in*')
+
+ def test_lastfailed_collectfailure(self, testdir, monkeypatch):
+
+ testdir.makepyfile(test_maybe="""
+ import py
+ env = py.std.os.environ
+ if '1' == env['FAILIMPORT']:
+ raise ImportError('fail')
+ def test_hello():
+ assert '0' == env['FAILTEST']
+ """)
+
+ def rlf(fail_import, fail_run):
+ monkeypatch.setenv('FAILIMPORT', fail_import)
+ monkeypatch.setenv('FAILTEST', fail_run)
+
+ testdir.runpytest('-q')
+ config = testdir.parseconfigure()
+ lastfailed = config.cache.get("cache/lastfailed", -1)
+ return lastfailed
+
+ lastfailed = rlf(fail_import=0, fail_run=0)
+ assert lastfailed == -1
+
+ lastfailed = rlf(fail_import=1, fail_run=0)
+ assert list(lastfailed) == ['test_maybe.py']
+
+ lastfailed = rlf(fail_import=0, fail_run=1)
+ assert list(lastfailed) == ['test_maybe.py::test_hello']
+
+
+ def test_lastfailed_failure_subset(self, testdir, monkeypatch):
+
+ testdir.makepyfile(test_maybe="""
+ import py
+ env = py.std.os.environ
+ if '1' == env['FAILIMPORT']:
+ raise ImportError('fail')
+ def test_hello():
+ assert '0' == env['FAILTEST']
+ """)
+
+ testdir.makepyfile(test_maybe2="""
+ import py
+ env = py.std.os.environ
+ if '1' == env['FAILIMPORT']:
+ raise ImportError('fail')
+ def test_hello():
+ assert '0' == env['FAILTEST']
+
+ def test_pass():
+ pass
+ """)
+
+ def rlf(fail_import, fail_run, args=()):
+ monkeypatch.setenv('FAILIMPORT', fail_import)
+ monkeypatch.setenv('FAILTEST', fail_run)
+
+ result = testdir.runpytest('-q', '--lf', *args)
+ config = testdir.parseconfigure()
+ lastfailed = config.cache.get("cache/lastfailed", -1)
+ return result, lastfailed
+
+ result, lastfailed = rlf(fail_import=0, fail_run=0)
+ assert lastfailed == -1
+ result.stdout.fnmatch_lines([
+ '*3 passed*',
+ ])
+
+ result, lastfailed = rlf(fail_import=1, fail_run=0)
+ assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
+
+
+ result, lastfailed = rlf(fail_import=0, fail_run=0,
+ args=('test_maybe2.py',))
+ assert list(lastfailed) == ['test_maybe.py']
+
+
+ # edge case of test selection - even if we remember failures
+ # from other tests we still need to run all tests if no test
+ # matches the failures
+ result, lastfailed = rlf(fail_import=0, fail_run=0,
+ args=('test_maybe2.py',))
+ assert list(lastfailed) == ['test_maybe.py']
+ result.stdout.fnmatch_lines([
+ '*2 passed*',
+ ])
+
+ def test_lastfailed_creates_cache_when_needed(self, testdir):
+ # Issue #1342
+ testdir.makepyfile(test_empty='')
+ testdir.runpytest('-q', '--lf')
+ assert not os.path.exists('.cache')
+
+ testdir.makepyfile(test_successful='def test_success():\n assert True')
+ testdir.runpytest('-q', '--lf')
+ assert not os.path.exists('.cache')
+
+ testdir.makepyfile(test_errored='def test_error():\n assert False')
+ testdir.runpytest('-q', '--lf')
+ assert os.path.exists('.cache')
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_capture.py b/testing/web-platform/tests/tools/pytest/testing/test_capture.py
new file mode 100644
index 000000000..73660692b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_capture.py
@@ -0,0 +1,1068 @@
+# note: py.io capture tests where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+from __future__ import with_statement
+import pickle
+import os
+import sys
+
+import _pytest._code
+import py
+import pytest
+import contextlib
+
+from _pytest import capture
+from _pytest.capture import CaptureManager
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+from py.builtin import print_
+
+needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
+
+if sys.version_info >= (3, 0):
+ def tobytes(obj):
+ if isinstance(obj, str):
+ obj = obj.encode('UTF-8')
+ assert isinstance(obj, bytes)
+ return obj
+
+ def totext(obj):
+ if isinstance(obj, bytes):
+ obj = str(obj, 'UTF-8')
+ assert isinstance(obj, str)
+ return obj
+else:
+ def tobytes(obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode('UTF-8')
+ assert isinstance(obj, str)
+ return obj
+
+ def totext(obj):
+ if isinstance(obj, str):
+ obj = unicode(obj, 'UTF-8')
+ assert isinstance(obj, unicode)
+ return obj
+
+
+def oswritebytes(fd, obj):
+ os.write(fd, tobytes(obj))
+
+
+
+def StdCaptureFD(out=True, err=True, in_=True):
+ return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
+
+def StdCapture(out=True, err=True, in_=True):
+ return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
+
+
+class TestCaptureManager:
+ def test_getmethod_default_no_fd(self, monkeypatch):
+ from _pytest.capture import pytest_addoption
+ from _pytest.config import Parser
+ parser = Parser()
+ pytest_addoption(parser)
+ default = parser._groups[0].options[0].default
+ assert default == "fd" if hasattr(os, "dup") else "sys"
+ parser = Parser()
+ monkeypatch.delattr(os, 'dup', raising=False)
+ pytest_addoption(parser)
+ assert parser._groups[0].options[0].default == "sys"
+
+ @needsosdup
+ @pytest.mark.parametrize("method",
+ ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
+ def test_capturing_basic_api(self, method):
+ capouter = StdCaptureFD()
+ old = sys.stdout, sys.stderr, sys.stdin
+ try:
+ capman = CaptureManager(method)
+ capman.init_capturings()
+ outerr = capman.suspendcapture()
+ assert outerr == ("", "")
+ outerr = capman.suspendcapture()
+ assert outerr == ("", "")
+ print ("hello")
+ out, err = capman.suspendcapture()
+ if method == "no":
+ assert old == (sys.stdout, sys.stderr, sys.stdin)
+ else:
+ assert not out
+ capman.resumecapture()
+ print ("hello")
+ out, err = capman.suspendcapture()
+ if method != "no":
+ assert out == "hello\n"
+ capman.reset_capturings()
+ finally:
+ capouter.stop_capturing()
+
+ @needsosdup
+ def test_init_capturing(self):
+ capouter = StdCaptureFD()
+ try:
+ capman = CaptureManager("fd")
+ capman.init_capturings()
+ pytest.raises(AssertionError, "capman.init_capturings()")
+ capman.reset_capturings()
+ finally:
+ capouter.stop_capturing()
+
+
+@pytest.mark.parametrize("method", ['fd', 'sys'])
+def test_capturing_unicode(testdir, method):
+ if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2,2):
+ pytest.xfail("does not work on pypy < 2.2")
+ if sys.version_info >= (3, 0):
+ obj = "'b\u00f6y'"
+ else:
+ obj = "u'\u00f6y'"
+ testdir.makepyfile("""
+ # coding=utf8
+ # taken from issue 227 from nosetests
+ def test_unicode():
+ import sys
+ print (sys.stdout)
+ print (%s)
+ """ % obj)
+ result = testdir.runpytest("--capture=%s" % method)
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+
+@pytest.mark.parametrize("method", ['fd', 'sys'])
+def test_capturing_bytes_in_utf8_encoding(testdir, method):
+ testdir.makepyfile("""
+ def test_unicode():
+ print ('b\\u00f6y')
+ """)
+ result = testdir.runpytest("--capture=%s" % method)
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+
+def test_collect_capturing(testdir):
+ p = testdir.makepyfile("""
+ print ("collect %s failure" % 13)
+ import xyz42123
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*Captured stdout*",
+ "*collect 13 failure*",
+ ])
+
+
+class TestPerTestCapturing:
+ def test_capture_and_fixtures(self, testdir):
+ p = testdir.makepyfile("""
+ def setup_module(mod):
+ print ("setup module")
+ def setup_function(function):
+ print ("setup " + function.__name__)
+ def test_func1():
+ print ("in func1")
+ assert 0
+ def test_func2():
+ print ("in func2")
+ assert 0
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "setup module*",
+ "setup test_func1*",
+ "in func1*",
+ "setup test_func2*",
+ "in func2*",
+ ])
+
+ @pytest.mark.xfail(reason="unimplemented feature")
+ def test_capture_scope_cache(self, testdir):
+ p = testdir.makepyfile("""
+ import sys
+ def setup_module(func):
+ print ("module-setup")
+ def setup_function(func):
+ print ("function-setup")
+ def test_func():
+ print ("in function")
+ assert 0
+ def teardown_function(func):
+ print ("in teardown")
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*test_func():*",
+ "*Captured stdout during setup*",
+ "module-setup*",
+ "function-setup*",
+ "*Captured stdout*",
+ "in teardown*",
+ ])
+
+ def test_no_carry_over(self, testdir):
+ p = testdir.makepyfile("""
+ def test_func1():
+ print ("in func1")
+ def test_func2():
+ print ("in func2")
+ assert 0
+ """)
+ result = testdir.runpytest(p)
+ s = result.stdout.str()
+ assert "in func1" not in s
+ assert "in func2" in s
+
+ def test_teardown_capturing(self, testdir):
+ p = testdir.makepyfile("""
+ def setup_function(function):
+ print ("setup func1")
+ def teardown_function(function):
+ print ("teardown func1")
+ assert 0
+ def test_func1():
+ print ("in func1")
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ '*teardown_function*',
+ '*Captured stdout*',
+ "setup func1*",
+ "in func1*",
+ "teardown func1*",
+ #"*1 fixture failure*"
+ ])
+
+ def test_teardown_capturing_final(self, testdir):
+ p = testdir.makepyfile("""
+ def teardown_module(mod):
+ print ("teardown module")
+ assert 0
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*def teardown_module(mod):*",
+ "*Captured stdout*",
+ "*teardown module*",
+ "*1 error*",
+ ])
+
+ def test_capturing_outerr(self, testdir):
+ p1 = testdir.makepyfile("""
+ import sys
+ def test_capturing():
+ print (42)
+ sys.stderr.write(str(23))
+ def test_capturing_error():
+ print (1)
+ sys.stderr.write(str(2))
+ raise ValueError
+ """)
+ result = testdir.runpytest(p1)
+ result.stdout.fnmatch_lines([
+ "*test_capturing_outerr.py .F",
+ "====* FAILURES *====",
+ "____*____",
+ "*test_capturing_outerr.py:8: ValueError",
+ "*--- Captured stdout *call*",
+ "1",
+ "*--- Captured stderr *call*",
+ "2",
+ ])
+
+
+class TestLoggingInteraction:
+ def test_logging_stream_ownership(self, testdir):
+ p = testdir.makepyfile("""
+ def test_logging():
+ import logging
+ import pytest
+ stream = capture.TextIO()
+ logging.basicConfig(stream=stream)
+ stream.close() # to free memory/release resources
+ """)
+ result = testdir.runpytest_subprocess(p)
+ result.stderr.str().find("atexit") == -1
+
+ def test_logging_and_immediate_setupteardown(self, testdir):
+ p = testdir.makepyfile("""
+ import logging
+ def setup_function(function):
+ logging.warn("hello1")
+
+ def test_logging():
+ logging.warn("hello2")
+ assert 0
+
+ def teardown_function(function):
+ logging.warn("hello3")
+ assert 0
+ """)
+ for optargs in (('--capture=sys',), ('--capture=fd',)):
+ print (optargs)
+ result = testdir.runpytest_subprocess(p, *optargs)
+ s = result.stdout.str()
+ result.stdout.fnmatch_lines([
+ "*WARN*hello3", # errors show first!
+ "*WARN*hello1",
+ "*WARN*hello2",
+ ])
+ # verify proper termination
+ assert "closed" not in s
+
+ def test_logging_and_crossscope_fixtures(self, testdir):
+ p = testdir.makepyfile("""
+ import logging
+ def setup_module(function):
+ logging.warn("hello1")
+
+ def test_logging():
+ logging.warn("hello2")
+ assert 0
+
+ def teardown_module(function):
+ logging.warn("hello3")
+ assert 0
+ """)
+ for optargs in (('--capture=sys',), ('--capture=fd',)):
+ print (optargs)
+ result = testdir.runpytest_subprocess(p, *optargs)
+ s = result.stdout.str()
+ result.stdout.fnmatch_lines([
+ "*WARN*hello3", # errors come first
+ "*WARN*hello1",
+ "*WARN*hello2",
+ ])
+ # verify proper termination
+ assert "closed" not in s
+
+ def test_logging_initialized_in_test(self, testdir):
+ p = testdir.makepyfile("""
+ import sys
+ def test_something():
+ # pytest does not import logging
+ assert 'logging' not in sys.modules
+ import logging
+ logging.basicConfig()
+ logging.warn("hello432")
+ assert 0
+ """)
+ result = testdir.runpytest_subprocess(
+ p, "--traceconfig",
+ "-p", "no:capturelog")
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*hello432*",
+ ])
+ assert 'operation on closed file' not in result.stderr.str()
+
+ def test_conftestlogging_is_shown(self, testdir):
+ testdir.makeconftest("""
+ import logging
+ logging.basicConfig()
+ logging.warn("hello435")
+ """)
+ # make sure that logging is still captured in tests
+ result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result.stderr.fnmatch_lines([
+ "WARNING*hello435*",
+ ])
+ assert 'operation on closed file' not in result.stderr.str()
+
+ def test_conftestlogging_and_test_logging(self, testdir):
+ testdir.makeconftest("""
+ import logging
+ logging.basicConfig()
+ """)
+ # make sure that logging is still captured in tests
+ p = testdir.makepyfile("""
+ def test_hello():
+ import logging
+ logging.warn("hello433")
+ assert 0
+ """)
+ result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "WARNING*hello433*",
+ ])
+ assert 'something' not in result.stderr.str()
+ assert 'operation on closed file' not in result.stderr.str()
+
+
+class TestCaptureFixture:
+ @pytest.mark.parametrize("opt", [[], ["-s"]])
+ def test_std_functional(self, testdir, opt):
+ reprec = testdir.inline_runsource("""
+ def test_hello(capsys):
+ print (42)
+ out, err = capsys.readouterr()
+ assert out.startswith("42")
+ """, *opt)
+ reprec.assertoutcome(passed=1)
+
+ def test_capsyscapfd(self, testdir):
+ p = testdir.makepyfile("""
+ def test_one(capsys, capfd):
+ pass
+ def test_two(capfd, capsys):
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*ERROR*setup*test_one*",
+ "*capsys*capfd*same*time*",
+ "*ERROR*setup*test_two*",
+ "*capsys*capfd*same*time*",
+ "*2 error*"])
+
+ @pytest.mark.parametrize("method", ["sys", "fd"])
+ def test_capture_is_represented_on_failure_issue128(self, testdir, method):
+ p = testdir.makepyfile("""
+ def test_hello(cap%s):
+ print ("xxx42xxx")
+ assert 0
+ """ % method)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "xxx42xxx",
+ ])
+
+ @needsosdup
+ def test_stdfd_functional(self, testdir):
+ reprec = testdir.inline_runsource("""
+ def test_hello(capfd):
+ import os
+ os.write(1, "42".encode('ascii'))
+ out, err = capfd.readouterr()
+ assert out.startswith("42")
+ capfd.close()
+ """)
+ reprec.assertoutcome(passed=1)
+
+ def test_partial_setup_failure(self, testdir):
+ p = testdir.makepyfile("""
+ def test_hello(capsys, missingarg):
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*test_partial_setup_failure*",
+ "*1 error*",
+ ])
+
+ @needsosdup
+ def test_keyboardinterrupt_disables_capturing(self, testdir):
+ p = testdir.makepyfile("""
+ def test_hello(capfd):
+ import os
+ os.write(1, str(42).encode('ascii'))
+ raise KeyboardInterrupt()
+ """)
+ result = testdir.runpytest_subprocess(p)
+ result.stdout.fnmatch_lines([
+ "*KeyboardInterrupt*"
+ ])
+ assert result.ret == 2
+
+ @pytest.mark.issue14
+ def test_capture_and_logging(self, testdir):
+ p = testdir.makepyfile("""
+ import logging
+ def test_log(capsys):
+ logging.error('x')
+ """)
+ result = testdir.runpytest_subprocess(p)
+ assert 'closed' not in result.stderr.str()
+
+
+def test_setup_failure_does_not_kill_capturing(testdir):
+ sub1 = testdir.mkpydir("sub1")
+ sub1.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_runtest_setup(item):
+ raise ValueError(42)
+ """))
+ sub1.join("test_mod.py").write("def test_func1(): pass")
+ result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
+ result.stdout.fnmatch_lines([
+ "*ValueError(42)*",
+ "*1 error*"
+ ])
+
+
+def test_fdfuncarg_skips_on_no_osdup(testdir):
+ testdir.makepyfile("""
+ import os
+ if hasattr(os, 'dup'):
+ del os.dup
+ def test_hello(capfd):
+ pass
+ """)
+ result = testdir.runpytest_subprocess("--capture=no")
+ result.stdout.fnmatch_lines([
+ "*1 skipped*"
+ ])
+
+
+def test_capture_conftest_runtest_setup(testdir):
+ testdir.makeconftest("""
+ def pytest_runtest_setup():
+ print ("hello19")
+ """)
+ testdir.makepyfile("def test_func(): pass")
+ result = testdir.runpytest()
+ assert result.ret == 0
+ assert 'hello19' not in result.stdout.str()
+
+
+def test_capture_badoutput_issue412(testdir):
+ testdir.makepyfile("""
+ import os
+
+ def test_func():
+ omg = bytearray([1,129,1])
+ os.write(1, omg)
+ assert 0
+ """)
+ result = testdir.runpytest('--cap=fd')
+ result.stdout.fnmatch_lines('''
+ *def test_func*
+ *assert 0*
+ *Captured*
+ *1 failed*
+ ''')
+
+
+def test_capture_early_option_parsing(testdir):
+ testdir.makeconftest("""
+ def pytest_runtest_setup():
+ print ("hello19")
+ """)
+ testdir.makepyfile("def test_func(): pass")
+ result = testdir.runpytest("-vs")
+ assert result.ret == 0
+ assert 'hello19' in result.stdout.str()
+
+
+def test_capture_binary_output(testdir):
+ testdir.makepyfile(r"""
+ import pytest
+
+ def test_a():
+ import sys
+ import subprocess
+ subprocess.call([sys.executable, __file__])
+
+ def test_foo():
+ import os;os.write(1, b'\xc3')
+
+ if __name__ == '__main__':
+ test_foo()
+ """)
+ result = testdir.runpytest('--assert=plain')
+ result.assert_outcomes(passed=2)
+
+
+def test_error_during_readouterr(testdir):
+ """Make sure we suspend capturing if errors occurr during readouterr"""
+ testdir.makepyfile(pytest_xyz="""
+ from _pytest.capture import FDCapture
+ def bad_snap(self):
+ raise Exception('boom')
+ assert FDCapture.snap
+ FDCapture.snap = bad_snap
+ """)
+ result = testdir.runpytest_subprocess(
+ "-p", "pytest_xyz", "--version", syspathinsert=True
+ )
+ result.stderr.fnmatch_lines([
+ "*in bad_snap",
+ " raise Exception('boom')",
+ "Exception: boom",
+ ])
+
+
+class TestTextIO:
+ def test_text(self):
+ f = capture.TextIO()
+ f.write("hello")
+ s = f.getvalue()
+ assert s == "hello"
+ f.close()
+
+ def test_unicode_and_str_mixture(self):
+ f = capture.TextIO()
+ if sys.version_info >= (3, 0):
+ f.write("\u00f6")
+ pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
+ else:
+ f.write(unicode("\u00f6", 'UTF-8'))
+ f.write("hello") # bytes
+ s = f.getvalue()
+ f.close()
+ assert isinstance(s, unicode)
+
+
+def test_bytes_io():
+ f = py.io.BytesIO()
+ f.write(tobytes("hello"))
+ pytest.raises(TypeError, "f.write(totext('hello'))")
+ s = f.getvalue()
+ assert s == tobytes("hello")
+
+
+def test_dontreadfrominput():
+ from _pytest.capture import DontReadFromInput
+ f = DontReadFromInput()
+ assert not f.isatty()
+ pytest.raises(IOError, f.read)
+ pytest.raises(IOError, f.readlines)
+ pytest.raises(IOError, iter, f)
+ pytest.raises(ValueError, f.fileno)
+ f.close() # just for completeness
+
+
+@pytest.yield_fixture
+def tmpfile(testdir):
+ f = testdir.makepyfile("").open('wb+')
+ yield f
+ if not f.closed:
+ f.close()
+
+@needsosdup
+def test_dupfile(tmpfile):
+ flist = []
+ for i in range(5):
+ nf = capture.safe_text_dupfile(tmpfile, "wb")
+ assert nf != tmpfile
+ assert nf.fileno() != tmpfile.fileno()
+ assert nf not in flist
+ print_(i, end="", file=nf)
+ flist.append(nf)
+ for i in range(5):
+ f = flist[i]
+ f.close()
+ tmpfile.seek(0)
+ s = tmpfile.read()
+ assert "01234" in repr(s)
+ tmpfile.close()
+
+def test_dupfile_on_bytesio():
+ io = py.io.BytesIO()
+ f = capture.safe_text_dupfile(io, "wb")
+ f.write("hello")
+ assert io.getvalue() == b"hello"
+
+def test_dupfile_on_textio():
+ io = py.io.TextIO()
+ f = capture.safe_text_dupfile(io, "wb")
+ f.write("hello")
+ assert io.getvalue() == "hello"
+
+
+@contextlib.contextmanager
+def lsof_check():
+ pid = os.getpid()
+ try:
+ out = py.process.cmdexec("lsof -p %d" % pid)
+ except (py.process.cmdexec.Error, UnicodeDecodeError):
+ # about UnicodeDecodeError, see note on pytester
+ pytest.skip("could not run 'lsof'")
+ yield
+ out2 = py.process.cmdexec("lsof -p %d" % pid)
+ len1 = len([x for x in out.split("\n") if "REG" in x])
+ len2 = len([x for x in out2.split("\n") if "REG" in x])
+ assert len2 < len1 + 3, out2
+
+
+class TestFDCapture:
+ pytestmark = needsosdup
+
+ def test_simple(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = capture.FDCapture(fd)
+ data = tobytes("hello")
+ os.write(fd, data)
+ s = cap.snap()
+ cap.done()
+ assert not s
+ cap = capture.FDCapture(fd)
+ cap.start()
+ os.write(fd, data)
+ s = cap.snap()
+ cap.done()
+ assert s == "hello"
+
+ def test_simple_many(self, tmpfile):
+ for i in range(10):
+ self.test_simple(tmpfile)
+
+ def test_simple_many_check_open_files(self, testdir):
+ with lsof_check():
+ with testdir.makepyfile("").open('wb+') as tmpfile:
+ self.test_simple_many(tmpfile)
+
+ def test_simple_fail_second_start(self, tmpfile):
+ fd = tmpfile.fileno()
+ cap = capture.FDCapture(fd)
+ cap.done()
+ pytest.raises(ValueError, cap.start)
+
+ def test_stderr(self):
+ cap = capture.FDCapture(2)
+ cap.start()
+ print_("hello", file=sys.stderr)
+ s = cap.snap()
+ cap.done()
+ assert s == "hello\n"
+
+ def test_stdin(self, tmpfile):
+ cap = capture.FDCapture(0)
+ cap.start()
+ x = os.read(0, 100).strip()
+ cap.done()
+ assert x == tobytes('')
+
+ def test_writeorg(self, tmpfile):
+ data1, data2 = tobytes("foo"), tobytes("bar")
+ cap = capture.FDCapture(tmpfile.fileno())
+ cap.start()
+ tmpfile.write(data1)
+ tmpfile.flush()
+ cap.writeorg(data2)
+ scap = cap.snap()
+ cap.done()
+ assert scap == totext(data1)
+ with open(tmpfile.name, 'rb') as stmp_file:
+ stmp = stmp_file.read()
+ assert stmp == data2
+
+ def test_simple_resume_suspend(self, tmpfile):
+ with saved_fd(1):
+ cap = capture.FDCapture(1)
+ cap.start()
+ data = tobytes("hello")
+ os.write(1, data)
+ sys.stdout.write("whatever")
+ s = cap.snap()
+ assert s == "hellowhatever"
+ cap.suspend()
+ os.write(1, tobytes("world"))
+ sys.stdout.write("qlwkej")
+ assert not cap.snap()
+ cap.resume()
+ os.write(1, tobytes("but now"))
+ sys.stdout.write(" yes\n")
+ s = cap.snap()
+ assert s == "but now yes\n"
+ cap.suspend()
+ cap.done()
+ pytest.raises(AttributeError, cap.suspend)
+
+
+@contextlib.contextmanager
+def saved_fd(fd):
+ new_fd = os.dup(fd)
+ try:
+ yield
+ finally:
+ os.dup2(new_fd, fd)
+ os.close(new_fd)
+
+
+class TestStdCapture:
+ captureclass = staticmethod(StdCapture)
+
+ @contextlib.contextmanager
+ def getcapture(self, **kw):
+ cap = self.__class__.captureclass(**kw)
+ cap.start_capturing()
+ try:
+ yield cap
+ finally:
+ cap.stop_capturing()
+
+ def test_capturing_done_simple(self):
+ with self.getcapture() as cap:
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.readouterr()
+ assert out == "hello"
+ assert err == "world"
+
+ def test_capturing_reset_simple(self):
+ with self.getcapture() as cap:
+ print("hello world")
+ sys.stderr.write("hello error\n")
+ out, err = cap.readouterr()
+ assert out == "hello world\n"
+ assert err == "hello error\n"
+
+ def test_capturing_readouterr(self):
+ with self.getcapture() as cap:
+ print ("hello world")
+ sys.stderr.write("hello error\n")
+ out, err = cap.readouterr()
+ assert out == "hello world\n"
+ assert err == "hello error\n"
+ sys.stderr.write("error2")
+ out, err = cap.readouterr()
+ assert err == "error2"
+
+ def test_capturing_readouterr_unicode(self):
+ with self.getcapture() as cap:
+ print ("hx\xc4\x85\xc4\x87")
+ out, err = cap.readouterr()
+ assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
+
+ @pytest.mark.skipif('sys.version_info >= (3,)',
+ reason='text output different for bytes on python3')
+ def test_capturing_readouterr_decode_error_handling(self):
+ with self.getcapture() as cap:
+ # triggered a internal error in pytest
+ print('\xa6')
+ out, err = cap.readouterr()
+ assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
+
+ def test_reset_twice_error(self):
+ with self.getcapture() as cap:
+ print ("hello")
+ out, err = cap.readouterr()
+ pytest.raises(ValueError, cap.stop_capturing)
+ assert out == "hello\n"
+ assert not err
+
+ def test_capturing_modify_sysouterr_in_between(self):
+ oldout = sys.stdout
+ olderr = sys.stderr
+ with self.getcapture() as cap:
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ sys.stdout = capture.TextIO()
+ sys.stderr = capture.TextIO()
+ print ("not seen")
+ sys.stderr.write("not seen\n")
+ out, err = cap.readouterr()
+ assert out == "hello"
+ assert err == "world"
+ assert sys.stdout == oldout
+ assert sys.stderr == olderr
+
+ def test_capturing_error_recursive(self):
+ with self.getcapture() as cap1:
+ print ("cap1")
+ with self.getcapture() as cap2:
+ print ("cap2")
+ out2, err2 = cap2.readouterr()
+ out1, err1 = cap1.readouterr()
+ assert out1 == "cap1\n"
+ assert out2 == "cap2\n"
+
+ def test_just_out_capture(self):
+ with self.getcapture(out=True, err=False) as cap:
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.readouterr()
+ assert out == "hello"
+ assert not err
+
+ def test_just_err_capture(self):
+ with self.getcapture(out=False, err=True) as cap:
+ sys.stdout.write("hello")
+ sys.stderr.write("world")
+ out, err = cap.readouterr()
+ assert err == "world"
+ assert not out
+
+ def test_stdin_restored(self):
+ old = sys.stdin
+ with self.getcapture(in_=True):
+ newstdin = sys.stdin
+ assert newstdin != sys.stdin
+ assert sys.stdin is old
+
+ def test_stdin_nulled_by_default(self):
+ print ("XXX this test may well hang instead of crashing")
+ print ("XXX which indicates an error in the underlying capturing")
+ print ("XXX mechanisms")
+ with self.getcapture():
+ pytest.raises(IOError, "sys.stdin.read()")
+
+
+class TestStdCaptureFD(TestStdCapture):
+ pytestmark = needsosdup
+ captureclass = staticmethod(StdCaptureFD)
+
+ def test_simple_only_fd(self, testdir):
+ testdir.makepyfile("""
+ import os
+ def test_x():
+ os.write(1, "hello\\n".encode("ascii"))
+ assert 0
+ """)
+ result = testdir.runpytest_subprocess()
+ result.stdout.fnmatch_lines("""
+ *test_x*
+ *assert 0*
+ *Captured stdout*
+ """)
+
+ def test_intermingling(self):
+ with self.getcapture() as cap:
+ oswritebytes(1, "1")
+ sys.stdout.write(str(2))
+ sys.stdout.flush()
+ oswritebytes(1, "3")
+ oswritebytes(2, "a")
+ sys.stderr.write("b")
+ sys.stderr.flush()
+ oswritebytes(2, "c")
+ out, err = cap.readouterr()
+ assert out == "123"
+ assert err == "abc"
+
+ def test_many(self, capfd):
+ with lsof_check():
+ for i in range(10):
+ cap = StdCaptureFD()
+ cap.stop_capturing()
+
+
+class TestStdCaptureFDinvalidFD:
+ pytestmark = needsosdup
+
+ def test_stdcapture_fd_invalid_fd(self, testdir):
+ testdir.makepyfile("""
+ import os
+ from _pytest import capture
+ def StdCaptureFD(out=True, err=True, in_=True):
+ return capture.MultiCapture(out, err, in_,
+ Capture=capture.FDCapture)
+ def test_stdout():
+ os.close(1)
+ cap = StdCaptureFD(out=True, err=False, in_=False)
+ cap.stop_capturing()
+ def test_stderr():
+ os.close(2)
+ cap = StdCaptureFD(out=False, err=True, in_=False)
+ cap.stop_capturing()
+ def test_stdin():
+ os.close(0)
+ cap = StdCaptureFD(out=False, err=False, in_=True)
+ cap.stop_capturing()
+ """)
+ result = testdir.runpytest_subprocess("--capture=fd")
+ assert result.ret == 0
+ assert result.parseoutcomes()['passed'] == 3
+
+
+def test_capture_not_started_but_reset():
+ capsys = StdCapture()
+ capsys.stop_capturing()
+
+
+@needsosdup
+@pytest.mark.parametrize('use', [True, False])
+def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
+ if not use:
+ tmpfile = True
+ cap = StdCaptureFD(out=False, err=tmpfile)
+ try:
+ cap.start_capturing()
+ capfile = cap.err.tmpfile
+ cap.readouterr()
+ finally:
+ cap.stop_capturing()
+ capfile2 = cap.err.tmpfile
+ assert capfile2 == capfile
+
+@needsosdup
+def test_close_and_capture_again(testdir):
+ testdir.makepyfile("""
+ import os
+ def test_close():
+ os.close(1)
+ def test_capture_again():
+ os.write(1, b"hello\\n")
+ assert 0
+ """)
+ result = testdir.runpytest_subprocess()
+ result.stdout.fnmatch_lines("""
+ *test_capture_again*
+ *assert 0*
+ *stdout*
+ *hello*
+ """)
+
+
+
+@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
+def test_capturing_and_logging_fundamentals(testdir, method):
+ if method == "StdCaptureFD" and not hasattr(os, 'dup'):
+ pytest.skip("need os.dup")
+ # here we check a fundamental feature
+ p = testdir.makepyfile("""
+ import sys, os
+ import py, logging
+ from _pytest import capture
+ cap = capture.MultiCapture(out=False, in_=False,
+ Capture=capture.%s)
+ cap.start_capturing()
+
+ logging.warn("hello1")
+ outerr = cap.readouterr()
+ print ("suspend, captured %%s" %%(outerr,))
+ logging.warn("hello2")
+
+ cap.pop_outerr_to_orig()
+ logging.warn("hello3")
+
+ outerr = cap.readouterr()
+ print ("suspend2, captured %%s" %% (outerr,))
+ """ % (method,))
+ result = testdir.runpython(p)
+ result.stdout.fnmatch_lines("""
+ suspend, captured*hello1*
+ suspend2, captured*WARNING:root:hello3*
+ """)
+ result.stderr.fnmatch_lines("""
+ WARNING:root:hello2
+ """)
+ assert "atexit" not in result.stderr.str()
+
+
+def test_error_attribute_issue555(testdir):
+ testdir.makepyfile("""
+ import sys
+ def test_capattr():
+ assert sys.stdout.errors == "strict"
+ assert sys.stderr.errors == "strict"
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+def test_dontreadfrominput_has_encoding(testdir):
+ testdir.makepyfile("""
+ import sys
+ def test_capattr():
+ # should not raise AttributeError
+ assert sys.stdout.encoding
+ assert sys.stderr.encoding
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+def test_pickling_and_unpickling_enocded_file():
+ # See https://bitbucket.org/pytest-dev/pytest/pull-request/194
+ # pickle.loads() raises infinite recursion if
+ # EncodedFile.__getattr__ is not implemented properly
+ ef = capture.EncodedFile(None, None)
+ ef_as_str = pickle.dumps(ef)
+ pickle.loads(ef_as_str)
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_collection.py b/testing/web-platform/tests/tools/pytest/testing/test_collection.py
new file mode 100644
index 000000000..749c5b7ce
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_collection.py
@@ -0,0 +1,641 @@
+import pytest, py
+
+from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
+
+class TestCollector:
+ def test_collect_versus_item(self):
+ from pytest import Collector, Item
+ assert not issubclass(Collector, Item)
+ assert not issubclass(Item, Collector)
+
+ def test_compat_attributes(self, testdir, recwarn):
+ modcol = testdir.getmodulecol("""
+ def test_pass(): pass
+ def test_fail(): assert 0
+ """)
+ recwarn.clear()
+ assert modcol.Module == pytest.Module
+ assert modcol.Class == pytest.Class
+ assert modcol.Item == pytest.Item
+ assert modcol.File == pytest.File
+ assert modcol.Function == pytest.Function
+
+ def test_check_equality(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def test_pass(): pass
+ def test_fail(): assert 0
+ """)
+ fn1 = testdir.collect_by_name(modcol, "test_pass")
+ assert isinstance(fn1, pytest.Function)
+ fn2 = testdir.collect_by_name(modcol, "test_pass")
+ assert isinstance(fn2, pytest.Function)
+
+ assert fn1 == fn2
+ assert fn1 != modcol
+ if py.std.sys.version_info < (3, 0):
+ assert cmp(fn1, fn2) == 0
+ assert hash(fn1) == hash(fn2)
+
+ fn3 = testdir.collect_by_name(modcol, "test_fail")
+ assert isinstance(fn3, pytest.Function)
+ assert not (fn1 == fn3)
+ assert fn1 != fn3
+
+ for fn in fn1,fn2,fn3:
+ assert fn != 3
+ assert fn != modcol
+ assert fn != [1,2,3]
+ assert [1,2,3] != fn
+ assert modcol != fn
+
+ def test_getparent(self, testdir):
+ modcol = testdir.getmodulecol("""
+ class TestClass:
+ def test_foo():
+ pass
+ """)
+ cls = testdir.collect_by_name(modcol, "TestClass")
+ fn = testdir.collect_by_name(
+ testdir.collect_by_name(cls, "()"), "test_foo")
+
+ parent = fn.getparent(pytest.Module)
+ assert parent is modcol
+
+ parent = fn.getparent(pytest.Function)
+ assert parent is fn
+
+ parent = fn.getparent(pytest.Class)
+ assert parent is cls
+
+
+ def test_getcustomfile_roundtrip(self, testdir):
+ hello = testdir.makefile(".xxx", hello="world")
+ testdir.makepyfile(conftest="""
+ import pytest
+ class CustomFile(pytest.File):
+ pass
+ def pytest_collect_file(path, parent):
+ if path.ext == ".xxx":
+ return CustomFile(path, parent=parent)
+ """)
+ node = testdir.getpathnode(hello)
+ assert isinstance(node, pytest.File)
+ assert node.name == "hello.xxx"
+ nodes = node.session.perform_collect([node.nodeid], genitems=False)
+ assert len(nodes) == 1
+ assert isinstance(nodes[0], pytest.File)
+
+class TestCollectFS:
+ def test_ignored_certain_directories(self, testdir):
+ tmpdir = testdir.tmpdir
+ tmpdir.ensure("_darcs", 'test_notfound.py')
+ tmpdir.ensure("CVS", 'test_notfound.py')
+ tmpdir.ensure("{arch}", 'test_notfound.py')
+ tmpdir.ensure(".whatever", 'test_notfound.py')
+ tmpdir.ensure(".bzr", 'test_notfound.py')
+ tmpdir.ensure("normal", 'test_found.py')
+ for x in tmpdir.visit("test_*.py"):
+ x.write("def test_hello(): pass")
+
+ result = testdir.runpytest("--collect-only")
+ s = result.stdout.str()
+ assert "test_notfound" not in s
+ assert "test_found" in s
+
+ def test_custom_norecursedirs(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ norecursedirs = mydir xyz*
+ """)
+ tmpdir = testdir.tmpdir
+ tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
+ tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
+ tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
+ rec = testdir.inline_run()
+ rec.assertoutcome(passed=1)
+ rec = testdir.inline_run("xyz123/test_2.py")
+ rec.assertoutcome(failed=1)
+
+ def test_testpaths_ini(self, testdir, monkeypatch):
+ testdir.makeini("""
+ [pytest]
+ testpaths = gui uts
+ """)
+ tmpdir = testdir.tmpdir
+ tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
+ tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
+ tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
+
+ # executing from rootdir only tests from `testpaths` directories
+ # are collected
+ items, reprec = testdir.inline_genitems('-v')
+ assert [x.name for x in items] == ['test_gui', 'test_uts']
+
+ # check that explicitly passing directories in the command-line
+ # collects the tests
+ for dirname in ('env', 'gui', 'uts'):
+ items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
+ assert [x.name for x in items] == ['test_%s' % dirname]
+
+ # changing cwd to each subdirectory and running pytest without
+ # arguments collects the tests in that directory normally
+ for dirname in ('env', 'gui', 'uts'):
+ monkeypatch.chdir(testdir.tmpdir.join(dirname))
+ items, reprec = testdir.inline_genitems()
+ assert [x.name for x in items] == ['test_%s' % dirname]
+
+
+class TestCollectPluginHookRelay:
+ def test_pytest_collect_file(self, testdir):
+ wascalled = []
+ class Plugin:
+ def pytest_collect_file(self, path, parent):
+ wascalled.append(path)
+ testdir.makefile(".abc", "xyz")
+ pytest.main([testdir.tmpdir], plugins=[Plugin()])
+ assert len(wascalled) == 1
+ assert wascalled[0].ext == '.abc'
+
+ def test_pytest_collect_directory(self, testdir):
+ wascalled = []
+ class Plugin:
+ def pytest_collect_directory(self, path, parent):
+ wascalled.append(path.basename)
+ testdir.mkdir("hello")
+ testdir.mkdir("world")
+ pytest.main(testdir.tmpdir, plugins=[Plugin()])
+ assert "hello" in wascalled
+ assert "world" in wascalled
+
+class TestPrunetraceback:
+ def test_collection_error(self, testdir):
+ p = testdir.makepyfile("""
+ import not_exists
+ """)
+ result = testdir.runpytest(p)
+ assert "__import__" not in result.stdout.str(), "too long traceback"
+ result.stdout.fnmatch_lines([
+ "*ERROR collecting*",
+ "*mport*not_exists*"
+ ])
+
+ def test_custom_repr_failure(self, testdir):
+ p = testdir.makepyfile("""
+ import not_exists
+ """)
+ testdir.makeconftest("""
+ import pytest
+ def pytest_collect_file(path, parent):
+ return MyFile(path, parent)
+ class MyError(Exception):
+ pass
+ class MyFile(pytest.File):
+ def collect(self):
+ raise MyError()
+ def repr_failure(self, excinfo):
+ if excinfo.errisinstance(MyError):
+ return "hello world"
+ return pytest.File.repr_failure(self, excinfo)
+ """)
+
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*ERROR collecting*",
+ "*hello world*",
+ ])
+
+ @pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
+ def test_collect_report_postprocessing(self, testdir):
+ p = testdir.makepyfile("""
+ import not_exists
+ """)
+ testdir.makeconftest("""
+ import pytest
+ def pytest_make_collect_report(__multicall__):
+ rep = __multicall__.execute()
+ rep.headerlines += ["header1"]
+ return rep
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*ERROR collecting*",
+ "*header1*",
+ ])
+
+
+class TestCustomConftests:
+ def test_ignore_collect_path(self, testdir):
+ testdir.makeconftest("""
+ def pytest_ignore_collect(path, config):
+ return path.basename.startswith("x") or \
+ path.basename == "test_one.py"
+ """)
+ sub = testdir.mkdir("xy123")
+ sub.ensure("test_hello.py").write("syntax error")
+ sub.join("conftest.py").write("syntax error")
+ testdir.makepyfile("def test_hello(): pass")
+ testdir.makepyfile(test_one="syntax error")
+ result = testdir.runpytest("--fulltrace")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_ignore_collect_not_called_on_argument(self, testdir):
+ testdir.makeconftest("""
+ def pytest_ignore_collect(path, config):
+ return True
+ """)
+ p = testdir.makepyfile("def test_hello(): pass")
+ result = testdir.runpytest(p)
+ assert result.ret == 0
+ result.stdout.fnmatch_lines("*1 passed*")
+ result = testdir.runpytest()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result.stdout.fnmatch_lines("*collected 0 items*")
+
+ def test_collectignore_exclude_on_option(self, testdir):
+ testdir.makeconftest("""
+ collect_ignore = ['hello', 'test_world.py']
+ def pytest_addoption(parser):
+ parser.addoption("--XX", action="store_true", default=False)
+ def pytest_configure(config):
+ if config.getvalue("XX"):
+ collect_ignore[:] = []
+ """)
+ testdir.mkdir("hello")
+ testdir.makepyfile(test_world="def test_hello(): pass")
+ result = testdir.runpytest()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ assert "passed" not in result.stdout.str()
+ result = testdir.runpytest("--XX")
+ assert result.ret == 0
+ assert "passed" in result.stdout.str()
+
+ def test_pytest_fs_collect_hooks_are_seen(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ class MyModule(pytest.Module):
+ pass
+ def pytest_collect_file(path, parent):
+ if path.ext == ".py":
+ return MyModule(path, parent)
+ """)
+ testdir.mkdir("sub")
+ testdir.makepyfile("def test_x(): pass")
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*MyModule*",
+ "*test_x*"
+ ])
+
+ def test_pytest_collect_file_from_sister_dir(self, testdir):
+ sub1 = testdir.mkpydir("sub1")
+ sub2 = testdir.mkpydir("sub2")
+ conf1 = testdir.makeconftest("""
+ import pytest
+ class MyModule1(pytest.Module):
+ pass
+ def pytest_collect_file(path, parent):
+ if path.ext == ".py":
+ return MyModule1(path, parent)
+ """)
+ conf1.move(sub1.join(conf1.basename))
+ conf2 = testdir.makeconftest("""
+ import pytest
+ class MyModule2(pytest.Module):
+ pass
+ def pytest_collect_file(path, parent):
+ if path.ext == ".py":
+ return MyModule2(path, parent)
+ """)
+ conf2.move(sub2.join(conf2.basename))
+ p = testdir.makepyfile("def test_x(): pass")
+ p.copy(sub1.join(p.basename))
+ p.copy(sub2.join(p.basename))
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*MyModule1*",
+ "*MyModule2*",
+ "*test_x*"
+ ])
+
+class TestSession:
+ def test_parsearg(self, testdir):
+ p = testdir.makepyfile("def test_func(): pass")
+ subdir = testdir.mkdir("sub")
+ subdir.ensure("__init__.py")
+ target = subdir.join(p.basename)
+ p.move(target)
+ subdir.chdir()
+ config = testdir.parseconfig(p.basename)
+ rcol = Session(config=config)
+ assert rcol.fspath == subdir
+ parts = rcol._parsearg(p.basename)
+
+ assert parts[0] == target
+ assert len(parts) == 1
+ parts = rcol._parsearg(p.basename + "::test_func")
+ assert parts[0] == target
+ assert parts[1] == "test_func"
+ assert len(parts) == 2
+
+ def test_collect_topdir(self, testdir):
+ p = testdir.makepyfile("def test_func(): pass")
+ id = "::".join([p.basename, "test_func"])
+ # XXX migrate to collectonly? (see below)
+ config = testdir.parseconfig(id)
+ topdir = testdir.tmpdir
+ rcol = Session(config)
+ assert topdir == rcol.fspath
+ #rootid = rcol.nodeid
+ #root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
+ #assert root2 == rcol, rootid
+ colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
+ assert len(colitems) == 1
+ assert colitems[0].fspath == p
+
+
+ def test_collect_protocol_single_function(self, testdir):
+ p = testdir.makepyfile("def test_func(): pass")
+ id = "::".join([p.basename, "test_func"])
+ items, hookrec = testdir.inline_genitems(id)
+ item, = items
+ assert item.name == "test_func"
+ newid = item.nodeid
+ assert newid == id
+ py.std.pprint.pprint(hookrec.calls)
+ topdir = testdir.tmpdir # noqa
+ hookrec.assert_contains([
+ ("pytest_collectstart", "collector.fspath == topdir"),
+ ("pytest_make_collect_report", "collector.fspath == topdir"),
+ ("pytest_collectstart", "collector.fspath == p"),
+ ("pytest_make_collect_report", "collector.fspath == p"),
+ ("pytest_pycollect_makeitem", "name == 'test_func'"),
+ ("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
+ ("pytest_collectreport", "report.nodeid == ''")
+ ])
+
+ def test_collect_protocol_method(self, testdir):
+ p = testdir.makepyfile("""
+ class TestClass:
+ def test_method(self):
+ pass
+ """)
+ normid = p.basename + "::TestClass::()::test_method"
+ for id in [p.basename,
+ p.basename + "::TestClass",
+ p.basename + "::TestClass::()",
+ normid,
+ ]:
+ items, hookrec = testdir.inline_genitems(id)
+ assert len(items) == 1
+ assert items[0].name == "test_method"
+ newid = items[0].nodeid
+ assert newid == normid
+
+ def test_collect_custom_nodes_multi_id(self, testdir):
+ p = testdir.makepyfile("def test_func(): pass")
+ testdir.makeconftest("""
+ import pytest
+ class SpecialItem(pytest.Item):
+ def runtest(self):
+ return # ok
+ class SpecialFile(pytest.File):
+ def collect(self):
+ return [SpecialItem(name="check", parent=self)]
+ def pytest_collect_file(path, parent):
+ if path.basename == %r:
+ return SpecialFile(fspath=path, parent=parent)
+ """ % p.basename)
+ id = p.basename
+
+ items, hookrec = testdir.inline_genitems(id)
+ py.std.pprint.pprint(hookrec.calls)
+ assert len(items) == 2
+ hookrec.assert_contains([
+ ("pytest_collectstart",
+ "collector.fspath == collector.session.fspath"),
+ ("pytest_collectstart",
+ "collector.__class__.__name__ == 'SpecialFile'"),
+ ("pytest_collectstart",
+ "collector.__class__.__name__ == 'Module'"),
+ ("pytest_pycollect_makeitem", "name == 'test_func'"),
+ ("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
+ #("pytest_collectreport",
+ # "report.fspath == %r" % str(rcol.fspath)),
+ ])
+
+ def test_collect_subdir_event_ordering(self, testdir):
+ p = testdir.makepyfile("def test_func(): pass")
+ aaa = testdir.mkpydir("aaa")
+ test_aaa = aaa.join("test_aaa.py")
+ p.move(test_aaa)
+
+ items, hookrec = testdir.inline_genitems()
+ assert len(items) == 1
+ py.std.pprint.pprint(hookrec.calls)
+ hookrec.assert_contains([
+ ("pytest_collectstart", "collector.fspath == test_aaa"),
+ ("pytest_pycollect_makeitem", "name == 'test_func'"),
+ ("pytest_collectreport",
+ "report.nodeid.startswith('aaa/test_aaa.py')"),
+ ])
+
+ def test_collect_two_commandline_args(self, testdir):
+ p = testdir.makepyfile("def test_func(): pass")
+ aaa = testdir.mkpydir("aaa")
+ bbb = testdir.mkpydir("bbb")
+ test_aaa = aaa.join("test_aaa.py")
+ p.copy(test_aaa)
+ test_bbb = bbb.join("test_bbb.py")
+ p.move(test_bbb)
+
+ id = "."
+
+ items, hookrec = testdir.inline_genitems(id)
+ assert len(items) == 2
+ py.std.pprint.pprint(hookrec.calls)
+ hookrec.assert_contains([
+ ("pytest_collectstart", "collector.fspath == test_aaa"),
+ ("pytest_pycollect_makeitem", "name == 'test_func'"),
+ ("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
+ ("pytest_collectstart", "collector.fspath == test_bbb"),
+ ("pytest_pycollect_makeitem", "name == 'test_func'"),
+ ("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
+ ])
+
+ def test_serialization_byid(self, testdir):
+ testdir.makepyfile("def test_func(): pass")
+ items, hookrec = testdir.inline_genitems()
+ assert len(items) == 1
+ item, = items
+ items2, hookrec = testdir.inline_genitems(item.nodeid)
+ item2, = items2
+ assert item2.name == item.name
+ assert item2.fspath == item.fspath
+
+ def test_find_byid_without_instance_parents(self, testdir):
+ p = testdir.makepyfile("""
+ class TestClass:
+ def test_method(self):
+ pass
+ """)
+ arg = p.basename + ("::TestClass::test_method")
+ items, hookrec = testdir.inline_genitems(arg)
+ assert len(items) == 1
+ item, = items
+ assert item.nodeid.endswith("TestClass::()::test_method")
+
+class Test_getinitialnodes:
+ def test_global_file(self, testdir, tmpdir):
+ x = tmpdir.ensure("x.py")
+ config = testdir.parseconfigure(x)
+ col = testdir.getnode(config, x)
+ assert isinstance(col, pytest.Module)
+ assert col.name == 'x.py'
+ assert col.parent.name == testdir.tmpdir.basename
+ assert col.parent.parent is None
+ for col in col.listchain():
+ assert col.config is config
+
+ def test_pkgfile(self, testdir):
+ tmpdir = testdir.tmpdir
+ subdir = tmpdir.join("subdir")
+ x = subdir.ensure("x.py")
+ subdir.ensure("__init__.py")
+ config = testdir.parseconfigure(x)
+ col = testdir.getnode(config, x)
+ assert isinstance(col, pytest.Module)
+ assert col.name == 'x.py'
+ assert col.parent.parent is None
+ for col in col.listchain():
+ assert col.config is config
+
+class Test_genitems:
+ def test_check_collect_hashes(self, testdir):
+ p = testdir.makepyfile("""
+ def test_1():
+ pass
+
+ def test_2():
+ pass
+ """)
+ p.copy(p.dirpath(p.purebasename + "2" + ".py"))
+ items, reprec = testdir.inline_genitems(p.dirpath())
+ assert len(items) == 4
+ for numi, i in enumerate(items):
+ for numj, j in enumerate(items):
+ if numj != numi:
+ assert hash(i) != hash(j)
+ assert i != j
+
+ def test_example_items1(self, testdir):
+ p = testdir.makepyfile('''
+ def testone():
+ pass
+
+ class TestX:
+ def testmethod_one(self):
+ pass
+
+ class TestY(TestX):
+ pass
+ ''')
+ items, reprec = testdir.inline_genitems(p)
+ assert len(items) == 3
+ assert items[0].name == 'testone'
+ assert items[1].name == 'testmethod_one'
+ assert items[2].name == 'testmethod_one'
+
+ # let's also test getmodpath here
+ assert items[0].getmodpath() == "testone"
+ assert items[1].getmodpath() == "TestX.testmethod_one"
+ assert items[2].getmodpath() == "TestY.testmethod_one"
+
+ s = items[0].getmodpath(stopatmodule=False)
+ assert s.endswith("test_example_items1.testone")
+ print(s)
+
+ def test_class_and_functions_discovery_using_glob(self, testdir):
+ """
+ tests that python_classes and python_functions config options work
+ as prefixes and glob-like patterns (issue #600).
+ """
+ testdir.makeini("""
+ [pytest]
+ python_classes = *Suite Test
+ python_functions = *_test test
+ """)
+ p = testdir.makepyfile('''
+ class MyTestSuite:
+ def x_test(self):
+ pass
+
+ class TestCase:
+ def test_y(self):
+ pass
+ ''')
+ items, reprec = testdir.inline_genitems(p)
+ ids = [x.getmodpath() for x in items]
+ assert ids == ['MyTestSuite.x_test', 'TestCase.test_y']
+
+
+def test_matchnodes_two_collections_same_file(testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_configure(config):
+ config.pluginmanager.register(Plugin2())
+
+ class Plugin2:
+ def pytest_collect_file(self, path, parent):
+ if path.ext == ".abc":
+ return MyFile2(path, parent)
+
+ def pytest_collect_file(path, parent):
+ if path.ext == ".abc":
+ return MyFile1(path, parent)
+
+ class MyFile1(pytest.Item, pytest.File):
+ def runtest(self):
+ pass
+ class MyFile2(pytest.File):
+ def collect(self):
+ return [Item2("hello", parent=self)]
+
+ class Item2(pytest.Item):
+ def runtest(self):
+ pass
+ """)
+ p = testdir.makefile(".abc", "")
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*2 passed*",
+ ])
+ res = testdir.runpytest("%s::hello" % p.basename)
+ res.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+
+
+class TestNodekeywords:
+ def test_no_under(self, testdir):
+ modcol = testdir.getmodulecol("""
+ def test_pass(): pass
+ def test_fail(): assert 0
+ """)
+ l = list(modcol.keywords)
+ assert modcol.name in l
+ for x in l:
+ assert not x.startswith("_")
+ assert modcol.name in repr(modcol.keywords)
+
+ def test_issue345(self, testdir):
+ testdir.makepyfile("""
+ def test_should_not_be_selected():
+ assert False, 'I should not have been selected to run'
+
+ def test___repr__():
+ pass
+ """)
+ reprec = testdir.inline_run("-k repr")
+ reprec.assertoutcome(passed=1, failed=0)
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_config.py b/testing/web-platform/tests/tools/pytest/testing/test_config.py
new file mode 100644
index 000000000..92c9bdb8b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_config.py
@@ -0,0 +1,570 @@
+import py, pytest
+
+import _pytest._code
+from _pytest.config import getcfg, get_common_ancestor, determine_setup
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+
+class TestParseIni:
+ def test_getcfg_and_config(self, testdir, tmpdir):
+ sub = tmpdir.mkdir("sub")
+ sub.chdir()
+ tmpdir.join("setup.cfg").write(_pytest._code.Source("""
+ [pytest]
+ name = value
+ """))
+ rootdir, inifile, cfg = getcfg([sub], ["setup.cfg"])
+ assert cfg['name'] == "value"
+ config = testdir.parseconfigure(sub)
+ assert config.inicfg['name'] == 'value'
+
+ def test_getcfg_empty_path(self, tmpdir):
+ getcfg([''], ['setup.cfg']) #happens on py.test ""
+
+ def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
+ monkeypatch.setenv('PYTEST_ADDOPTS', '--color no -rs --tb="short"')
+ tmpdir.join("setup.cfg").write(_pytest._code.Source("""
+ [pytest]
+ addopts = --verbose
+ """))
+ config = testdir.parseconfig(tmpdir)
+ assert config.option.color == 'no'
+ assert config.option.reportchars == 's'
+ assert config.option.tbstyle == 'short'
+ assert config.option.verbose
+ #config = testdir.Config()
+ #args = [tmpdir,]
+ #config._preparse(args, addopts=False)
+ #assert len(args) == 1
+
+ def test_tox_ini_wrong_version(self, testdir):
+ testdir.makefile('.ini', tox="""
+ [pytest]
+ minversion=9.0
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ result.stderr.fnmatch_lines([
+ "*tox.ini:2*requires*9.0*actual*"
+ ])
+
+ @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
+ def test_ini_names(self, testdir, name):
+ testdir.tmpdir.join(name).write(py.std.textwrap.dedent("""
+ [pytest]
+ minversion = 1.0
+ """))
+ config = testdir.parseconfig()
+ assert config.getini("minversion") == "1.0"
+
+ def test_toxini_before_lower_pytestini(self, testdir):
+ sub = testdir.tmpdir.mkdir("sub")
+ sub.join("tox.ini").write(py.std.textwrap.dedent("""
+ [pytest]
+ minversion = 2.0
+ """))
+ testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
+ [pytest]
+ minversion = 1.5
+ """))
+ config = testdir.parseconfigure(sub)
+ assert config.getini("minversion") == "2.0"
+
+ @pytest.mark.xfail(reason="probably not needed")
+ def test_confcutdir(self, testdir):
+ sub = testdir.mkdir("sub")
+ sub.chdir()
+ testdir.makeini("""
+ [pytest]
+ addopts = --qwe
+ """)
+ result = testdir.inline_run("--confcutdir=.")
+ assert result.ret == 0
+
+class TestConfigCmdlineParsing:
+ def test_parsing_again_fails(self, testdir):
+ config = testdir.parseconfig()
+ pytest.raises(AssertionError, lambda: config.parse([]))
+
+ def test_explicitly_specified_config_file_is_loaded(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("custom", "")
+ """)
+ testdir.makeini("""
+ [pytest]
+ custom = 0
+ """)
+ testdir.makefile(".cfg", custom = """
+ [pytest]
+ custom = 1
+ """)
+ config = testdir.parseconfig("-c", "custom.cfg")
+ assert config.getini("custom") == "1"
+
+class TestConfigAPI:
+ def test_config_trace(self, testdir):
+ config = testdir.parseconfig()
+ l = []
+ config.trace.root.setwriter(l.append)
+ config.trace("hello")
+ assert len(l) == 1
+ assert l[0] == "hello [config]\n"
+
+ def test_config_getoption(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addoption("--hello", "-X", dest="hello")
+ """)
+ config = testdir.parseconfig("--hello=this")
+ for x in ("hello", "--hello", "-X"):
+ assert config.getoption(x) == "this"
+ pytest.raises(ValueError, "config.getoption('qweqwe')")
+
+ @pytest.mark.skipif('sys.version_info[:2] not in [(2, 6), (2, 7)]')
+ def test_config_getoption_unicode(self, testdir):
+ testdir.makeconftest("""
+ from __future__ import unicode_literals
+
+ def pytest_addoption(parser):
+ parser.addoption('--hello', type='string')
+ """)
+ config = testdir.parseconfig('--hello=this')
+ assert config.getoption('hello') == 'this'
+
+ def test_config_getvalueorskip(self, testdir):
+ config = testdir.parseconfig()
+ pytest.raises(pytest.skip.Exception,
+ "config.getvalueorskip('hello')")
+ verbose = config.getvalueorskip("verbose")
+ assert verbose == config.option.verbose
+
+ def test_config_getvalueorskip_None(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addoption("--hello")
+ """)
+ config = testdir.parseconfig()
+ with pytest.raises(pytest.skip.Exception):
+ config.getvalueorskip('hello')
+
+ def test_getoption(self, testdir):
+ config = testdir.parseconfig()
+ with pytest.raises(ValueError):
+ config.getvalue('x')
+ assert config.getoption("x", 1) == 1
+
+ def test_getconftest_pathlist(self, testdir, tmpdir):
+ somepath = tmpdir.join("x", "y", "z")
+ p = tmpdir.join("conftest.py")
+ p.write("pathlist = ['.', %r]" % str(somepath))
+ config = testdir.parseconfigure(p)
+ assert config._getconftest_pathlist('notexist', path=tmpdir) is None
+ pl = config._getconftest_pathlist('pathlist', path=tmpdir)
+ print(pl)
+ assert len(pl) == 2
+ assert pl[0] == tmpdir
+ assert pl[1] == somepath
+
+ def test_addini(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("myname", "my new ini value")
+ """)
+ testdir.makeini("""
+ [pytest]
+ myname=hello
+ """)
+ config = testdir.parseconfig()
+ val = config.getini("myname")
+ assert val == "hello"
+ pytest.raises(ValueError, config.getini, 'other')
+
+ def test_addini_pathlist(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("paths", "my new ini value", type="pathlist")
+ parser.addini("abc", "abc value")
+ """)
+ p = testdir.makeini("""
+ [pytest]
+ paths=hello world/sub.py
+ """)
+ config = testdir.parseconfig()
+ l = config.getini("paths")
+ assert len(l) == 2
+ assert l[0] == p.dirpath('hello')
+ assert l[1] == p.dirpath('world/sub.py')
+ pytest.raises(ValueError, config.getini, 'other')
+
+ def test_addini_args(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("args", "new args", type="args")
+ parser.addini("a2", "", "args", default="1 2 3".split())
+ """)
+ testdir.makeini("""
+ [pytest]
+ args=123 "123 hello" "this"
+ """)
+ config = testdir.parseconfig()
+ l = config.getini("args")
+ assert len(l) == 3
+ assert l == ["123", "123 hello", "this"]
+ l = config.getini("a2")
+ assert l == list("123")
+
+ def test_addini_linelist(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("xy", "", type="linelist")
+ parser.addini("a2", "", "linelist")
+ """)
+ testdir.makeini("""
+ [pytest]
+ xy= 123 345
+ second line
+ """)
+ config = testdir.parseconfig()
+ l = config.getini("xy")
+ assert len(l) == 2
+ assert l == ["123 345", "second line"]
+ l = config.getini("a2")
+ assert l == []
+
+ @pytest.mark.parametrize('str_val, bool_val',
+ [('True', True), ('no', False), ('no-ini', True)])
+ def test_addini_bool(self, testdir, str_val, bool_val):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("strip", "", type="bool", default=True)
+ """)
+ if str_val != 'no-ini':
+ testdir.makeini("""
+ [pytest]
+ strip=%s
+ """ % str_val)
+ config = testdir.parseconfig()
+ assert config.getini("strip") is bool_val
+
+ def test_addinivalue_line_existing(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("xy", "", type="linelist")
+ """)
+ testdir.makeini("""
+ [pytest]
+ xy= 123
+ """)
+ config = testdir.parseconfig()
+ l = config.getini("xy")
+ assert len(l) == 1
+ assert l == ["123"]
+ config.addinivalue_line("xy", "456")
+ l = config.getini("xy")
+ assert len(l) == 2
+ assert l == ["123", "456"]
+
+ def test_addinivalue_line_new(self, testdir):
+ testdir.makeconftest("""
+ def pytest_addoption(parser):
+ parser.addini("xy", "", type="linelist")
+ """)
+ config = testdir.parseconfig()
+ assert not config.getini("xy")
+ config.addinivalue_line("xy", "456")
+ l = config.getini("xy")
+ assert len(l) == 1
+ assert l == ["456"]
+ config.addinivalue_line("xy", "123")
+ l = config.getini("xy")
+ assert len(l) == 2
+ assert l == ["456", "123"]
+
+
+class TestConfigFromdictargs:
+ def test_basic_behavior(self):
+ from _pytest.config import Config
+ option_dict = {
+ 'verbose': 444,
+ 'foo': 'bar',
+ 'capture': 'no',
+ }
+ args = ['a', 'b']
+
+ config = Config.fromdictargs(option_dict, args)
+ with pytest.raises(AssertionError):
+ config.parse(['should refuse to parse again'])
+ assert config.option.verbose == 444
+ assert config.option.foo == 'bar'
+ assert config.option.capture == 'no'
+ assert config.args == args
+
+ def test_origargs(self):
+ """Show that fromdictargs can handle args in their "orig" format"""
+ from _pytest.config import Config
+ option_dict = {}
+ args = ['-vvvv', '-s', 'a', 'b']
+
+ config = Config.fromdictargs(option_dict, args)
+ assert config.args == ['a', 'b']
+ assert config._origargs == args
+ assert config.option.verbose == 4
+ assert config.option.capture == 'no'
+
+ def test_inifilename(self, tmpdir):
+ tmpdir.join("foo/bar.ini").ensure().write(_pytest._code.Source("""
+ [pytest]
+ name = value
+ """))
+
+ from _pytest.config import Config
+ inifile = '../../foo/bar.ini'
+ option_dict = {
+ 'inifilename': inifile,
+ 'capture': 'no',
+ }
+
+ cwd = tmpdir.join('a/b')
+ cwd.join('pytest.ini').ensure().write(_pytest._code.Source("""
+ [pytest]
+ name = wrong-value
+ should_not_be_set = true
+ """))
+ with cwd.ensure(dir=True).as_cwd():
+ config = Config.fromdictargs(option_dict, ())
+
+ assert config.args == [str(cwd)]
+ assert config.option.inifilename == inifile
+ assert config.option.capture == 'no'
+
+ # this indicates this is the file used for getting configuration values
+ assert config.inifile == inifile
+ assert config.inicfg.get('name') == 'value'
+ assert config.inicfg.get('should_not_be_set') is None
+
+
+def test_options_on_small_file_do_not_blow_up(testdir):
+ def runfiletest(opts):
+ reprec = testdir.inline_run(*opts)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 2
+ assert skipped == passed == 0
+ path = testdir.makepyfile("""
+ def test_f1(): assert 0
+ def test_f2(): assert 0
+ """)
+
+ for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'],
+ ['--tb=long'], ['--fulltrace'], ['--nomagic'],
+ ['--traceconfig'], ['-v'], ['-v', '-v']):
+ runfiletest(opts + [path])
+
+def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
+ pkg_resources = pytest.importorskip("pkg_resources")
+ def my_iter(name):
+ assert name == "pytest11"
+ class EntryPoint:
+ name = "mytestplugin"
+ class dist:
+ pass
+ def load(self):
+ class PseudoPlugin:
+ x = 42
+ return PseudoPlugin()
+ return iter([EntryPoint()])
+ monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
+ testdir.makeconftest("""
+ pytest_plugins = "mytestplugin",
+ """)
+ monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
+ config = testdir.parseconfig()
+ plugin = config.pluginmanager.getplugin("mytestplugin")
+ assert plugin.x == 42
+
+def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch):
+ pkg_resources = pytest.importorskip("pkg_resources")
+ def my_iter(name):
+ assert name == "pytest11"
+ class EntryPoint:
+ name = "mytestplugin"
+ def load(self):
+ assert 0, "should not arrive here"
+ return iter([EntryPoint()])
+ monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
+ config = testdir.parseconfig("-p", "no:mytestplugin")
+ plugin = config.pluginmanager.getplugin("mytestplugin")
+ assert plugin is None
+
+def test_cmdline_processargs_simple(testdir):
+ testdir.makeconftest("""
+ def pytest_cmdline_preparse(args):
+ args.append("-h")
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*pytest*",
+ "*-h*",
+ ])
+
+def test_invalid_options_show_extra_information(testdir):
+ """display extra information when pytest exits due to unrecognized
+ options in the command-line"""
+ testdir.makeini("""
+ [pytest]
+ addopts = --invalid-option
+ """)
+ result = testdir.runpytest()
+ result.stderr.fnmatch_lines([
+ "*error: unrecognized arguments: --invalid-option*",
+ "* inifile: %s*" % testdir.tmpdir.join('tox.ini'),
+ "* rootdir: %s*" % testdir.tmpdir,
+ ])
+
+
+@pytest.mark.parametrize('args', [
+ ['dir1', 'dir2', '-v'],
+ ['dir1', '-v', 'dir2'],
+ ['dir2', '-v', 'dir1'],
+ ['-v', 'dir2', 'dir1'],
+])
+def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
+ """
+ Consider all arguments in the command-line for rootdir and inifile
+ discovery, even if they happen to occur after an option. #949
+ """
+ # replace "dir1" and "dir2" from "args" into their real directory
+ root = testdir.tmpdir.mkdir('myroot')
+ d1 = root.mkdir('dir1')
+ d2 = root.mkdir('dir2')
+ for i, arg in enumerate(args):
+ if arg == 'dir1':
+ args[i] = d1
+ elif arg == 'dir2':
+ args[i] = d2
+ result = testdir.runpytest(*args)
+ result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile: '])
+
+
+@pytest.mark.skipif("sys.platform == 'win32'")
+def test_toolongargs_issue224(testdir):
+ result = testdir.runpytest("-m", "hello" * 500)
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+def test_notify_exception(testdir, capfd):
+ config = testdir.parseconfig()
+ excinfo = pytest.raises(ValueError, "raise ValueError(1)")
+ config.notify_exception(excinfo)
+ out, err = capfd.readouterr()
+ assert "ValueError" in err
+ class A:
+ def pytest_internalerror(self, excrepr):
+ return True
+ config.pluginmanager.register(A())
+ config.notify_exception(excinfo)
+ out, err = capfd.readouterr()
+ assert not err
+
+
+def test_load_initial_conftest_last_ordering(testdir):
+ from _pytest.config import get_config
+ pm = get_config().pluginmanager
+ class My:
+ def pytest_load_initial_conftests(self):
+ pass
+ m = My()
+ pm.register(m)
+ hc = pm.hook.pytest_load_initial_conftests
+ l = hc._nonwrappers + hc._wrappers
+ assert l[-1].function.__module__ == "_pytest.capture"
+ assert l[-2].function == m.pytest_load_initial_conftests
+ assert l[-3].function.__module__ == "_pytest.config"
+
+class TestWarning:
+ def test_warn_config(self, testdir):
+ testdir.makeconftest("""
+ l = []
+ def pytest_configure(config):
+ config.warn("C1", "hello")
+ def pytest_logwarning(code, message):
+ if message == "hello" and code == "C1":
+ l.append(1)
+ """)
+ testdir.makepyfile("""
+ def test_proper(pytestconfig):
+ import conftest
+ assert conftest.l == [1]
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_warn_on_test_item_from_request(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def fix(request):
+ request.node.warn("T1", "hello")
+ def test_hello(fix):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.parseoutcomes()["pytest-warnings"] > 0
+ assert "hello" not in result.stdout.str()
+
+ result = testdir.runpytest("-rw")
+ result.stdout.fnmatch_lines("""
+ ===*pytest-warning summary*===
+ *WT1*test_warn_on_test_item*:5*hello*
+ """)
+
+class TestRootdir:
+ def test_simple_noini(self, tmpdir):
+ assert get_common_ancestor([tmpdir]) == tmpdir
+ assert get_common_ancestor([tmpdir.mkdir("a"), tmpdir]) == tmpdir
+ assert get_common_ancestor([tmpdir, tmpdir.join("a")]) == tmpdir
+ with tmpdir.as_cwd():
+ assert get_common_ancestor([]) == tmpdir
+
+ @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
+ def test_with_ini(self, tmpdir, name):
+ inifile = tmpdir.join(name)
+ inifile.write("[pytest]\n")
+
+ a = tmpdir.mkdir("a")
+ b = a.mkdir("b")
+ for args in ([tmpdir], [a], [b]):
+ rootdir, inifile, inicfg = determine_setup(None, args)
+ assert rootdir == tmpdir
+ assert inifile == inifile
+ rootdir, inifile, inicfg = determine_setup(None, [b,a])
+ assert rootdir == tmpdir
+ assert inifile == inifile
+
+ @pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
+ def test_pytestini_overides_empty_other(self, tmpdir, name):
+ inifile = tmpdir.ensure("pytest.ini")
+ a = tmpdir.mkdir("a")
+ a.ensure(name)
+ rootdir, inifile, inicfg = determine_setup(None, [a])
+ assert rootdir == tmpdir
+ assert inifile == inifile
+
+ def test_setuppy_fallback(self, tmpdir):
+ a = tmpdir.mkdir("a")
+ a.ensure("setup.cfg")
+ tmpdir.ensure("setup.py")
+ rootdir, inifile, inicfg = determine_setup(None, [a])
+ assert rootdir == tmpdir
+ assert inifile is None
+ assert inicfg == {}
+
+ def test_nothing(self, tmpdir):
+ rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
+ assert rootdir == tmpdir
+ assert inifile is None
+ assert inicfg == {}
+
+ def test_with_specific_inifile(self, tmpdir):
+ inifile = tmpdir.ensure("pytest.ini")
+ rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
+ assert rootdir == tmpdir
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_conftest.py b/testing/web-platform/tests/tools/pytest/testing/test_conftest.py
new file mode 100644
index 000000000..6f5e77f6d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_conftest.py
@@ -0,0 +1,409 @@
+from textwrap import dedent
+
+import _pytest._code
+import py
+import pytest
+from _pytest.config import PytestPluginManager
+from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
+
+
+@pytest.fixture(scope="module", params=["global", "inpackage"])
+def basedir(request, tmpdir_factory):
+ from _pytest.tmpdir import tmpdir
+ tmpdir = tmpdir(request, tmpdir_factory)
+ tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
+ tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
+ if request.param == "inpackage":
+ tmpdir.ensure("adir/__init__.py")
+ tmpdir.ensure("adir/b/__init__.py")
+ return tmpdir
+
+def ConftestWithSetinitial(path):
+ conftest = PytestPluginManager()
+ conftest_setinitial(conftest, [path])
+ return conftest
+
+def conftest_setinitial(conftest, args, confcutdir=None):
+ class Namespace:
+ def __init__(self):
+ self.file_or_dir = args
+ self.confcutdir = str(confcutdir)
+ self.noconftest = False
+ conftest._set_initial_conftests(Namespace())
+
+class TestConftestValueAccessGlobal:
+ def test_basic_init(self, basedir):
+ conftest = PytestPluginManager()
+ p = basedir.join("adir")
+ assert conftest._rget_with_confmod("a", p)[1] == 1
+
+ def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
+ conftest = PytestPluginManager()
+ len(conftest._path2confmods)
+ conftest._getconftestmodules(basedir)
+ snap1 = len(conftest._path2confmods)
+ #assert len(conftest._path2confmods) == snap1 + 1
+ conftest._getconftestmodules(basedir.join('adir'))
+ assert len(conftest._path2confmods) == snap1 + 1
+ conftest._getconftestmodules(basedir.join('b'))
+ assert len(conftest._path2confmods) == snap1 + 2
+
+ def test_value_access_not_existing(self, basedir):
+ conftest = ConftestWithSetinitial(basedir)
+ with pytest.raises(KeyError):
+ conftest._rget_with_confmod('a', basedir)
+
+ def test_value_access_by_path(self, basedir):
+ conftest = ConftestWithSetinitial(basedir)
+ adir = basedir.join("adir")
+ assert conftest._rget_with_confmod("a", adir)[1] == 1
+ assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
+
+ def test_value_access_with_confmod(self, basedir):
+ startdir = basedir.join("adir", "b")
+ startdir.ensure("xx", dir=True)
+ conftest = ConftestWithSetinitial(startdir)
+ mod, value = conftest._rget_with_confmod("a", startdir)
+ assert value == 1.5
+ path = py.path.local(mod.__file__)
+ assert path.dirpath() == basedir.join("adir", "b")
+ assert path.purebasename.startswith("conftest")
+
+def test_conftest_in_nonpkg_with_init(tmpdir):
+ tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
+ tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
+ tmpdir.ensure("adir-1.0/b/__init__.py")
+ tmpdir.ensure("adir-1.0/__init__.py")
+ ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
+
+def test_doubledash_considered(testdir):
+ conf = testdir.mkdir("--option")
+ conf.join("conftest.py").ensure()
+ conftest = PytestPluginManager()
+ conftest_setinitial(conftest, [conf.basename, conf.basename])
+ l = conftest._getconftestmodules(conf)
+ assert len(l) == 1
+
+def test_issue151_load_all_conftests(testdir):
+ names = "code proj src".split()
+ for name in names:
+ p = testdir.mkdir(name)
+ p.ensure("conftest.py")
+
+ conftest = PytestPluginManager()
+ conftest_setinitial(conftest, names)
+ d = list(conftest._conftestpath2mod.values())
+ assert len(d) == len(names)
+
+def test_conftest_global_import(testdir):
+ testdir.makeconftest("x=3")
+ p = testdir.makepyfile("""
+ import py, pytest
+ from _pytest.config import PytestPluginManager
+ conf = PytestPluginManager()
+ mod = conf._importconftest(py.path.local("conftest.py"))
+ assert mod.x == 3
+ import conftest
+ assert conftest is mod, (conftest, mod)
+ subconf = py.path.local().ensure("sub", "conftest.py")
+ subconf.write("y=4")
+ mod2 = conf._importconftest(subconf)
+ assert mod != mod2
+ assert mod2.y == 4
+ import conftest
+ assert conftest is mod2, (conftest, mod)
+ """)
+ res = testdir.runpython(p)
+ assert res.ret == 0
+
+def test_conftestcutdir(testdir):
+ conf = testdir.makeconftest("")
+ p = testdir.mkdir("x")
+ conftest = PytestPluginManager()
+ conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
+ l = conftest._getconftestmodules(p)
+ assert len(l) == 0
+ l = conftest._getconftestmodules(conf.dirpath())
+ assert len(l) == 0
+ assert conf not in conftest._conftestpath2mod
+ # but we can still import a conftest directly
+ conftest._importconftest(conf)
+ l = conftest._getconftestmodules(conf.dirpath())
+ assert l[0].__file__.startswith(str(conf))
+ # and all sub paths get updated properly
+ l = conftest._getconftestmodules(p)
+ assert len(l) == 1
+ assert l[0].__file__.startswith(str(conf))
+
+def test_conftestcutdir_inplace_considered(testdir):
+ conf = testdir.makeconftest("")
+ conftest = PytestPluginManager()
+ conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
+ l = conftest._getconftestmodules(conf.dirpath())
+ assert len(l) == 1
+ assert l[0].__file__.startswith(str(conf))
+
+@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
+def test_setinitial_conftest_subdirs(testdir, name):
+ sub = testdir.mkdir(name)
+ subconftest = sub.ensure("conftest.py")
+ conftest = PytestPluginManager()
+ conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
+ if name not in ('whatever', '.dotdir'):
+ assert subconftest in conftest._conftestpath2mod
+ assert len(conftest._conftestpath2mod) == 1
+ else:
+ assert subconftest not in conftest._conftestpath2mod
+ assert len(conftest._conftestpath2mod) == 0
+
+def test_conftest_confcutdir(testdir):
+ testdir.makeconftest("assert 0")
+ x = testdir.mkdir("x")
+ x.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_addoption(parser):
+ parser.addoption("--xyz", action="store_true")
+ """))
+ result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
+ result.stdout.fnmatch_lines(["*--xyz*"])
+ assert 'warning: could not load initial' not in result.stdout.str()
+
+def test_no_conftest(testdir):
+ testdir.makeconftest("assert 0")
+ result = testdir.runpytest("--noconftest")
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+ result = testdir.runpytest()
+ assert result.ret == EXIT_USAGEERROR
+
+def test_conftest_existing_resultlog(testdir):
+ x = testdir.mkdir("tests")
+ x.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_addoption(parser):
+ parser.addoption("--xyz", action="store_true")
+ """))
+ testdir.makefile(ext=".log", result="") # Writes result.log
+ result = testdir.runpytest("-h", "--resultlog", "result.log")
+ result.stdout.fnmatch_lines(["*--xyz*"])
+
+def test_conftest_existing_junitxml(testdir):
+ x = testdir.mkdir("tests")
+ x.join("conftest.py").write(_pytest._code.Source("""
+ def pytest_addoption(parser):
+ parser.addoption("--xyz", action="store_true")
+ """))
+ testdir.makefile(ext=".xml", junit="") # Writes junit.xml
+ result = testdir.runpytest("-h", "--junitxml", "junit.xml")
+ result.stdout.fnmatch_lines(["*--xyz*"])
+
+def test_conftest_import_order(testdir, monkeypatch):
+ ct1 = testdir.makeconftest("")
+ sub = testdir.mkdir("sub")
+ ct2 = sub.join("conftest.py")
+ ct2.write("")
+ def impct(p):
+ return p
+ conftest = PytestPluginManager()
+ monkeypatch.setattr(conftest, '_importconftest', impct)
+ assert conftest._getconftestmodules(sub) == [ct1, ct2]
+
+
+def test_fixture_dependency(testdir, monkeypatch):
+ ct1 = testdir.makeconftest("")
+ ct1 = testdir.makepyfile("__init__.py")
+ ct1.write("")
+ sub = testdir.mkdir("sub")
+ sub.join("__init__.py").write("")
+ sub.join("conftest.py").write(py.std.textwrap.dedent("""
+ import pytest
+
+ @pytest.fixture
+ def not_needed():
+ assert False, "Should not be called!"
+
+ @pytest.fixture
+ def foo():
+ assert False, "Should not be called!"
+
+ @pytest.fixture
+ def bar(foo):
+ return 'bar'
+ """))
+ subsub = sub.mkdir("subsub")
+ subsub.join("__init__.py").write("")
+ subsub.join("test_bar.py").write(py.std.textwrap.dedent("""
+ import pytest
+
+ @pytest.fixture
+ def bar():
+ return 'sub bar'
+
+ def test_event_fixture(bar):
+ assert bar == 'sub bar'
+ """))
+ result = testdir.runpytest("sub")
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+
+def test_conftest_found_with_double_dash(testdir):
+ sub = testdir.mkdir("sub")
+ sub.join("conftest.py").write(py.std.textwrap.dedent("""
+ def pytest_addoption(parser):
+ parser.addoption("--hello-world", action="store_true")
+ """))
+ p = sub.join("test_hello.py")
+ p.write(py.std.textwrap.dedent("""
+ import pytest
+ def test_hello(found):
+ assert found == 1
+ """))
+ result = testdir.runpytest(str(p) + "::test_hello", "-h")
+ result.stdout.fnmatch_lines("""
+ *--hello-world*
+ """)
+
+
+class TestConftestVisibility:
+ def _setup_tree(self, testdir): # for issue616
+ # example mostly taken from:
+ # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
+ runner = testdir.mkdir("empty")
+ package = testdir.mkdir("package")
+
+ package.join("conftest.py").write(dedent("""\
+ import pytest
+ @pytest.fixture
+ def fxtr():
+ return "from-package"
+ """))
+ package.join("test_pkgroot.py").write(dedent("""\
+ def test_pkgroot(fxtr):
+ assert fxtr == "from-package"
+ """))
+
+ swc = package.mkdir("swc")
+ swc.join("__init__.py").ensure()
+ swc.join("conftest.py").write(dedent("""\
+ import pytest
+ @pytest.fixture
+ def fxtr():
+ return "from-swc"
+ """))
+ swc.join("test_with_conftest.py").write(dedent("""\
+ def test_with_conftest(fxtr):
+ assert fxtr == "from-swc"
+
+ """))
+
+ snc = package.mkdir("snc")
+ snc.join("__init__.py").ensure()
+ snc.join("test_no_conftest.py").write(dedent("""\
+ def test_no_conftest(fxtr):
+ assert fxtr == "from-package" # No local conftest.py, so should
+ # use value from parent dir's
+
+ """))
+ print ("created directory structure:")
+ for x in testdir.tmpdir.visit():
+ print (" " + x.relto(testdir.tmpdir))
+
+ return {
+ "runner": runner,
+ "package": package,
+ "swc": swc,
+ "snc": snc}
+
+ # N.B.: "swc" stands for "subdir with conftest.py"
+ # "snc" stands for "subdir no [i.e. without] conftest.py"
+ @pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [
+ # Effective target: package/..
+ ("runner", "..", 3),
+ ("package", "..", 3),
+ ("swc", "../..", 3),
+ ("snc", "../..", 3),
+
+ # Effective target: package
+ ("runner", "../package", 3),
+ ("package", ".", 3),
+ ("swc", "..", 3),
+ ("snc", "..", 3),
+
+ # Effective target: package/swc
+ ("runner", "../package/swc", 1),
+ ("package", "./swc", 1),
+ ("swc", ".", 1),
+ ("snc", "../swc", 1),
+
+ # Effective target: package/snc
+ ("runner", "../package/snc", 1),
+ ("package", "./snc", 1),
+ ("swc", "../snc", 1),
+ ("snc", ".", 1),
+ ])
+ @pytest.mark.issue616
+ def test_parsefactories_relative_node_ids(
+ self, testdir, chdir,testarg, expect_ntests_passed):
+ dirs = self._setup_tree(testdir)
+ print("pytest run in cwd: %s" %(
+ dirs[chdir].relto(testdir.tmpdir)))
+ print("pytestarg : %s" %(testarg))
+ print("expected pass : %s" %(expect_ntests_passed))
+ with dirs[chdir].as_cwd():
+ reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
+ reprec.assertoutcome(passed=expect_ntests_passed)
+
+
+@pytest.mark.parametrize('confcutdir,passed,error', [
+ ('.', 2, 0),
+ ('src', 1, 1),
+ (None, 1, 1),
+])
+def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
+ """Test that conftest files are detected only up to a ini file, unless
+ an explicit --confcutdir option is given.
+ """
+ root = testdir.tmpdir
+ src = root.join('src').ensure(dir=1)
+ src.join('pytest.ini').write('[pytest]')
+ src.join('conftest.py').write(_pytest._code.Source("""
+ import pytest
+ @pytest.fixture
+ def fix1(): pass
+ """))
+ src.join('test_foo.py').write(_pytest._code.Source("""
+ def test_1(fix1):
+ pass
+ def test_2(out_of_reach):
+ pass
+ """))
+ root.join('conftest.py').write(_pytest._code.Source("""
+ import pytest
+ @pytest.fixture
+ def out_of_reach(): pass
+ """))
+
+ args = [str(src)]
+ if confcutdir:
+ args = ['--confcutdir=%s' % root.join(confcutdir)]
+ result = testdir.runpytest(*args)
+ match = ''
+ if passed:
+ match += '*%d passed*' % passed
+ if error:
+ match += '*%d error*' % error
+ result.stdout.fnmatch_lines(match)
+
+
+def test_issue1073_conftest_special_objects(testdir):
+ testdir.makeconftest("""
+ class DontTouchMe:
+ def __getattr__(self, x):
+ raise Exception('cant touch me')
+
+ x = DontTouchMe()
+ """)
+ testdir.makepyfile("""
+ def test_some():
+ pass
+ """)
+ res = testdir.runpytest()
+ assert res.ret == 0
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_doctest.py b/testing/web-platform/tests/tools/pytest/testing/test_doctest.py
new file mode 100644
index 000000000..a4821ee4c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_doctest.py
@@ -0,0 +1,715 @@
+# encoding: utf-8
+import sys
+import _pytest._code
+from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
+import pytest
+
+class TestDoctests:
+
+ def test_collect_testtextfile(self, testdir):
+ w = testdir.maketxtfile(whatever="")
+ checkfile = testdir.maketxtfile(test_something="""
+ alskdjalsdk
+ >>> i = 5
+ >>> i-1
+ 4
+ """)
+ for x in (testdir.tmpdir, checkfile):
+ #print "checking that %s returns custom items" % (x,)
+ items, reprec = testdir.inline_genitems(x)
+ assert len(items) == 1
+ assert isinstance(items[0], DoctestTextfile)
+ items, reprec = testdir.inline_genitems(w)
+ assert len(items) == 1
+
+ def test_collect_module_empty(self, testdir):
+ path = testdir.makepyfile(whatever="#")
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
+ assert len(items) == 0
+
+ def test_collect_module_single_modulelevel_doctest(self, testdir):
+ path = testdir.makepyfile(whatever='""">>> pass"""')
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
+ assert len(items) == 1
+ assert isinstance(items[0], DoctestItem)
+ assert isinstance(items[0].parent, DoctestModule)
+
+ def test_collect_module_two_doctest_one_modulelevel(self, testdir):
+ path = testdir.makepyfile(whatever="""
+ '>>> x = None'
+ def my_func():
+ ">>> magic = 42 "
+ """)
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
+ assert len(items) == 2
+ assert isinstance(items[0], DoctestItem)
+ assert isinstance(items[1], DoctestItem)
+ assert isinstance(items[0].parent, DoctestModule)
+ assert items[0].parent is items[1].parent
+
+ def test_collect_module_two_doctest_no_modulelevel(self, testdir):
+ path = testdir.makepyfile(whatever="""
+ '# Empty'
+ def my_func():
+ ">>> magic = 42 "
+ def unuseful():
+ '''
+ # This is a function
+ # >>> # it doesn't have any doctest
+ '''
+ def another():
+ '''
+ # This is another function
+ >>> import os # this one does have a doctest
+ '''
+ """)
+ for p in (path, testdir.tmpdir):
+ items, reprec = testdir.inline_genitems(p,
+ '--doctest-modules')
+ assert len(items) == 2
+ assert isinstance(items[0], DoctestItem)
+ assert isinstance(items[1], DoctestItem)
+ assert isinstance(items[0].parent, DoctestModule)
+ assert items[0].parent is items[1].parent
+
+ def test_simple_doctestfile(self, testdir):
+ p = testdir.maketxtfile(test_doc="""
+ >>> x = 1
+ >>> x == 1
+ False
+ """)
+ reprec = testdir.inline_run(p, )
+ reprec.assertoutcome(failed=1)
+
+ def test_new_pattern(self, testdir):
+ p = testdir.maketxtfile(xdoc="""
+ >>> x = 1
+ >>> x == 1
+ False
+ """)
+ reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+ reprec.assertoutcome(failed=1)
+
+ def test_multiple_patterns(self, testdir):
+ """Test support for multiple --doctest-glob arguments (#1255).
+ """
+ testdir.maketxtfile(xdoc="""
+ >>> 1
+ 1
+ """)
+ testdir.makefile('.foo', test="""
+ >>> 1
+ 1
+ """)
+ testdir.maketxtfile(test_normal="""
+ >>> 1
+ 1
+ """)
+ expected = set(['xdoc.txt', 'test.foo', 'test_normal.txt'])
+ assert set(x.basename for x in testdir.tmpdir.listdir()) == expected
+ args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
+ result = testdir.runpytest(*args)
+ result.stdout.fnmatch_lines([
+ '*test.foo *',
+ '*xdoc.txt *',
+ '*2 passed*',
+ ])
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ '*test_normal.txt *',
+ '*1 passed*',
+ ])
+
+ def test_doctest_unexpected_exception(self, testdir):
+ testdir.maketxtfile("""
+ >>> i = 0
+ >>> 0 / i
+ 2
+ """)
+ result = testdir.runpytest("--doctest-modules")
+ result.stdout.fnmatch_lines([
+ "*unexpected_exception*",
+ "*>>> i = 0*",
+ "*>>> 0 / i*",
+ "*UNEXPECTED*ZeroDivision*",
+ ])
+
+ def test_docstring_context_around_error(self, testdir):
+ """Test that we show some context before the actual line of a failing
+ doctest.
+ """
+ testdir.makepyfile('''
+ def foo():
+ """
+ text-line-1
+ text-line-2
+ text-line-3
+ text-line-4
+ text-line-5
+ text-line-6
+ text-line-7
+ text-line-8
+ text-line-9
+ text-line-10
+ text-line-11
+ >>> 1 + 1
+ 3
+
+ text-line-after
+ """
+ ''')
+ result = testdir.runpytest('--doctest-modules')
+ result.stdout.fnmatch_lines([
+ '*docstring_context_around_error*',
+ '005*text-line-3',
+ '006*text-line-4',
+ '013*text-line-11',
+ '014*>>> 1 + 1',
+ 'Expected:',
+ ' 3',
+ 'Got:',
+ ' 2',
+ ])
+ # lines below should be trimmed out
+ assert 'text-line-2' not in result.stdout.str()
+ assert 'text-line-after' not in result.stdout.str()
+
+ def test_doctest_linedata_missing(self, testdir):
+ testdir.tmpdir.join('hello.py').write(_pytest._code.Source("""
+ class Fun(object):
+ @property
+ def test(self):
+ '''
+ >>> a = 1
+ >>> 1/0
+ '''
+ """))
+ result = testdir.runpytest("--doctest-modules")
+ result.stdout.fnmatch_lines([
+ "*hello*",
+ "*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
+ "*1/0*",
+ "*UNEXPECTED*ZeroDivision*",
+ "*1 failed*",
+ ])
+
+
+ def test_doctest_unex_importerror(self, testdir):
+ testdir.tmpdir.join("hello.py").write(_pytest._code.Source("""
+ import asdalsdkjaslkdjasd
+ """))
+ testdir.maketxtfile("""
+ >>> import hello
+ >>>
+ """)
+ result = testdir.runpytest("--doctest-modules")
+ result.stdout.fnmatch_lines([
+ "*>>> import hello",
+ "*UNEXPECTED*ImportError*",
+ "*import asdals*",
+ ])
+
+ def test_doctestmodule(self, testdir):
+ p = testdir.makepyfile("""
+ '''
+ >>> x = 1
+ >>> x == 1
+ False
+
+ '''
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(failed=1)
+
+ def test_doctestmodule_external_and_issue116(self, testdir):
+ p = testdir.mkpydir("hello")
+ p.join("__init__.py").write(_pytest._code.Source("""
+ def somefunc():
+ '''
+ >>> i = 0
+ >>> i + 1
+ 2
+ '''
+ """))
+ result = testdir.runpytest(p, "--doctest-modules")
+ result.stdout.fnmatch_lines([
+ '004 *>>> i = 0',
+ '005 *>>> i + 1',
+ '*Expected:',
+ "* 2",
+ "*Got:",
+ "* 1",
+ "*:5: DocTestFailure"
+ ])
+
+
+ def test_txtfile_failing(self, testdir):
+ p = testdir.maketxtfile("""
+ >>> i = 0
+ >>> i + 1
+ 2
+ """)
+ result = testdir.runpytest(p, "-s")
+ result.stdout.fnmatch_lines([
+ '001 >>> i = 0',
+ '002 >>> i + 1',
+ 'Expected:',
+ " 2",
+ "Got:",
+ " 1",
+ "*test_txtfile_failing.txt:2: DocTestFailure"
+ ])
+
+ def test_txtfile_with_fixtures(self, testdir):
+ p = testdir.maketxtfile("""
+ >>> dir = getfixture('tmpdir')
+ >>> type(dir).__name__
+ 'LocalPath'
+ """)
+ reprec = testdir.inline_run(p, )
+ reprec.assertoutcome(passed=1)
+
+ def test_txtfile_with_usefixtures_in_ini(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ usefixtures = myfixture
+ """)
+ testdir.makeconftest("""
+ import pytest
+ @pytest.fixture
+ def myfixture(monkeypatch):
+ monkeypatch.setenv("HELLO", "WORLD")
+ """)
+
+ p = testdir.maketxtfile("""
+ >>> import os
+ >>> os.environ["HELLO"]
+ 'WORLD'
+ """)
+ reprec = testdir.inline_run(p, )
+ reprec.assertoutcome(passed=1)
+
+ def test_doctestmodule_with_fixtures(self, testdir):
+ p = testdir.makepyfile("""
+ '''
+ >>> dir = getfixture('tmpdir')
+ >>> type(dir).__name__
+ 'LocalPath'
+ '''
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(passed=1)
+
+ def test_doctestmodule_three_tests(self, testdir):
+ p = testdir.makepyfile("""
+ '''
+ >>> dir = getfixture('tmpdir')
+ >>> type(dir).__name__
+ 'LocalPath'
+ '''
+ def my_func():
+ '''
+ >>> magic = 42
+ >>> magic - 42
+ 0
+ '''
+ def unuseful():
+ pass
+ def another():
+ '''
+ >>> import os
+ >>> os is os
+ True
+ '''
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(passed=3)
+
+ def test_doctestmodule_two_tests_one_fail(self, testdir):
+ p = testdir.makepyfile("""
+ class MyClass:
+ def bad_meth(self):
+ '''
+ >>> magic = 42
+ >>> magic
+ 0
+ '''
+ def nice_meth(self):
+ '''
+ >>> magic = 42
+ >>> magic - 42
+ 0
+ '''
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(failed=1, passed=1)
+
+ def test_ignored_whitespace(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
+ """)
+ p = testdir.makepyfile("""
+ class MyClass:
+ '''
+ >>> a = "foo "
+ >>> print(a)
+ foo
+ '''
+ pass
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(passed=1)
+
+ def test_non_ignored_whitespace(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ doctest_optionflags = ELLIPSIS
+ """)
+ p = testdir.makepyfile("""
+ class MyClass:
+ '''
+ >>> a = "foo "
+ >>> print(a)
+ foo
+ '''
+ pass
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules")
+ reprec.assertoutcome(failed=1, passed=0)
+
+ def test_ignored_whitespace_glob(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
+ """)
+ p = testdir.maketxtfile(xdoc="""
+ >>> a = "foo "
+ >>> print(a)
+ foo
+ """)
+ reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+ reprec.assertoutcome(passed=1)
+
+ def test_non_ignored_whitespace_glob(self, testdir):
+ testdir.makeini("""
+ [pytest]
+ doctest_optionflags = ELLIPSIS
+ """)
+ p = testdir.maketxtfile(xdoc="""
+ >>> a = "foo "
+ >>> print(a)
+ foo
+ """)
+ reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+ reprec.assertoutcome(failed=1, passed=0)
+
+ def test_contains_unicode(self, testdir):
+ """Fix internal error with docstrings containing non-ascii characters.
+ """
+ testdir.makepyfile(u'''
+ # encoding: utf-8
+ def foo():
+ """
+ >>> name = 'Ñ' # not letter 'c' but instead Cyrillic 's'.
+ 'anything'
+ """
+ ''')
+ result = testdir.runpytest('--doctest-modules')
+ result.stdout.fnmatch_lines([
+ 'Got nothing',
+ '* 1 failed in*',
+ ])
+
+ def test_ignore_import_errors_on_doctest(self, testdir):
+ p = testdir.makepyfile("""
+ import asdf
+
+ def add_one(x):
+ '''
+ >>> add_one(1)
+ 2
+ '''
+ return x + 1
+ """)
+
+ reprec = testdir.inline_run(p, "--doctest-modules",
+ "--doctest-ignore-import-errors")
+ reprec.assertoutcome(skipped=1, failed=1, passed=0)
+
+ def test_junit_report_for_doctest(self, testdir):
+ """
+ #713: Fix --junit-xml option when used with --doctest-modules.
+ """
+ p = testdir.makepyfile("""
+ def foo():
+ '''
+ >>> 1 + 1
+ 3
+ '''
+ pass
+ """)
+ reprec = testdir.inline_run(p, "--doctest-modules",
+ "--junit-xml=junit.xml")
+ reprec.assertoutcome(failed=1)
+
+
+class TestLiterals:
+
+ @pytest.mark.parametrize('config_mode', ['ini', 'comment'])
+ def test_allow_unicode(self, testdir, config_mode):
+ """Test that doctests which output unicode work in all python versions
+ tested by pytest when the ALLOW_UNICODE option is used (either in
+ the ini file or by an inline comment).
+ """
+ if config_mode == 'ini':
+ testdir.makeini('''
+ [pytest]
+ doctest_optionflags = ALLOW_UNICODE
+ ''')
+ comment = ''
+ else:
+ comment = '#doctest: +ALLOW_UNICODE'
+
+ testdir.maketxtfile(test_doc="""
+ >>> b'12'.decode('ascii') {comment}
+ '12'
+ """.format(comment=comment))
+ testdir.makepyfile(foo="""
+ def foo():
+ '''
+ >>> b'12'.decode('ascii') {comment}
+ '12'
+ '''
+ """.format(comment=comment))
+ reprec = testdir.inline_run("--doctest-modules")
+ reprec.assertoutcome(passed=2)
+
+ @pytest.mark.parametrize('config_mode', ['ini', 'comment'])
+ def test_allow_bytes(self, testdir, config_mode):
+ """Test that doctests which output bytes work in all python versions
+ tested by pytest when the ALLOW_BYTES option is used (either in
+ the ini file or by an inline comment)(#1287).
+ """
+ if config_mode == 'ini':
+ testdir.makeini('''
+ [pytest]
+ doctest_optionflags = ALLOW_BYTES
+ ''')
+ comment = ''
+ else:
+ comment = '#doctest: +ALLOW_BYTES'
+
+ testdir.maketxtfile(test_doc="""
+ >>> b'foo' {comment}
+ 'foo'
+ """.format(comment=comment))
+ testdir.makepyfile(foo="""
+ def foo():
+ '''
+ >>> b'foo' {comment}
+ 'foo'
+ '''
+ """.format(comment=comment))
+ reprec = testdir.inline_run("--doctest-modules")
+ reprec.assertoutcome(passed=2)
+
+ def test_unicode_string(self, testdir):
+ """Test that doctests which output unicode fail in Python 2 when
+ the ALLOW_UNICODE option is not used. The same test should pass
+ in Python 3.
+ """
+ testdir.maketxtfile(test_doc="""
+ >>> b'12'.decode('ascii')
+ '12'
+ """)
+ reprec = testdir.inline_run()
+ passed = int(sys.version_info[0] >= 3)
+ reprec.assertoutcome(passed=passed, failed=int(not passed))
+
+ def test_bytes_literal(self, testdir):
+ """Test that doctests which output bytes fail in Python 3 when
+ the ALLOW_BYTES option is not used. The same test should pass
+ in Python 2 (#1287).
+ """
+ testdir.maketxtfile(test_doc="""
+ >>> b'foo'
+ 'foo'
+ """)
+ reprec = testdir.inline_run()
+ passed = int(sys.version_info[0] == 2)
+ reprec.assertoutcome(passed=passed, failed=int(not passed))
+
+
+class TestDoctestSkips:
+ """
+ If all examples in a doctest are skipped due to the SKIP option, then
+ the tests should be SKIPPED rather than PASSED. (#957)
+ """
+
+ @pytest.fixture(params=['text', 'module'])
+ def makedoctest(self, testdir, request):
+ def makeit(doctest):
+ mode = request.param
+ if mode == 'text':
+ testdir.maketxtfile(doctest)
+ else:
+ assert mode == 'module'
+ testdir.makepyfile('"""\n%s"""' % doctest)
+
+ return makeit
+
+ def test_one_skipped(self, testdir, makedoctest):
+ makedoctest("""
+ >>> 1 + 1 # doctest: +SKIP
+ 2
+ >>> 2 + 2
+ 4
+ """)
+ reprec = testdir.inline_run("--doctest-modules")
+ reprec.assertoutcome(passed=1)
+
+ def test_one_skipped_failed(self, testdir, makedoctest):
+ makedoctest("""
+ >>> 1 + 1 # doctest: +SKIP
+ 2
+ >>> 2 + 2
+ 200
+ """)
+ reprec = testdir.inline_run("--doctest-modules")
+ reprec.assertoutcome(failed=1)
+
+ def test_all_skipped(self, testdir, makedoctest):
+ makedoctest("""
+ >>> 1 + 1 # doctest: +SKIP
+ 2
+ >>> 2 + 2 # doctest: +SKIP
+ 200
+ """)
+ reprec = testdir.inline_run("--doctest-modules")
+ reprec.assertoutcome(skipped=1)
+
+
+class TestDoctestAutoUseFixtures:
+
+ SCOPES = ['module', 'session', 'class', 'function']
+
+ def test_doctest_module_session_fixture(self, testdir):
+ """Test that session fixtures are initialized for doctest modules (#768)
+ """
+ # session fixture which changes some global data, which will
+ # be accessed by doctests in a module
+ testdir.makeconftest("""
+ import pytest
+ import sys
+
+ @pytest.yield_fixture(autouse=True, scope='session')
+ def myfixture():
+ assert not hasattr(sys, 'pytest_session_data')
+ sys.pytest_session_data = 1
+ yield
+ del sys.pytest_session_data
+ """)
+ testdir.makepyfile(foo="""
+ import sys
+
+ def foo():
+ '''
+ >>> assert sys.pytest_session_data == 1
+ '''
+
+ def bar():
+ '''
+ >>> assert sys.pytest_session_data == 1
+ '''
+ """)
+ result = testdir.runpytest("--doctest-modules")
+ result.stdout.fnmatch_lines('*2 passed*')
+
+ @pytest.mark.parametrize('scope', SCOPES)
+ @pytest.mark.parametrize('enable_doctest', [True, False])
+ def test_fixture_scopes(self, testdir, scope, enable_doctest):
+ """Test that auto-use fixtures work properly with doctest modules.
+ See #1057 and #1100.
+ """
+ testdir.makeconftest('''
+ import pytest
+
+ @pytest.fixture(autouse=True, scope="{scope}")
+ def auto(request):
+ return 99
+ '''.format(scope=scope))
+ testdir.makepyfile(test_1='''
+ def test_foo():
+ """
+ >>> getfixture('auto') + 1
+ 100
+ """
+ def test_bar():
+ assert 1
+ ''')
+ params = ('--doctest-modules',) if enable_doctest else ()
+ passes = 3 if enable_doctest else 2
+ result = testdir.runpytest(*params)
+ result.stdout.fnmatch_lines(['*=== %d passed in *' % passes])
+
+ @pytest.mark.parametrize('scope', SCOPES)
+ @pytest.mark.parametrize('autouse', [True, False])
+ @pytest.mark.parametrize('use_fixture_in_doctest', [True, False])
+ def test_fixture_module_doctest_scopes(self, testdir, scope, autouse,
+ use_fixture_in_doctest):
+ """Test that auto-use fixtures work properly with doctest files.
+ See #1057 and #1100.
+ """
+ testdir.makeconftest('''
+ import pytest
+
+ @pytest.fixture(autouse={autouse}, scope="{scope}")
+ def auto(request):
+ return 99
+ '''.format(scope=scope, autouse=autouse))
+ if use_fixture_in_doctest:
+ testdir.maketxtfile(test_doc="""
+ >>> getfixture('auto')
+ 99
+ """)
+ else:
+ testdir.maketxtfile(test_doc="""
+ >>> 1 + 1
+ 2
+ """)
+ result = testdir.runpytest('--doctest-modules')
+ assert 'FAILURES' not in str(result.stdout.str())
+ result.stdout.fnmatch_lines(['*=== 1 passed in *'])
+
+ @pytest.mark.parametrize('scope', SCOPES)
+ def test_auto_use_request_attributes(self, testdir, scope):
+ """Check that all attributes of a request in an autouse fixture
+ behave as expected when requested for a doctest item.
+ """
+ testdir.makeconftest('''
+ import pytest
+
+ @pytest.fixture(autouse=True, scope="{scope}")
+ def auto(request):
+ if "{scope}" == 'module':
+ assert request.module is None
+ if "{scope}" == 'class':
+ assert request.cls is None
+ if "{scope}" == 'function':
+ assert request.function is None
+ return 99
+ '''.format(scope=scope))
+ testdir.maketxtfile(test_doc="""
+ >>> 1 + 1
+ 2
+ """)
+ result = testdir.runpytest('--doctest-modules')
+ assert 'FAILURES' not in str(result.stdout.str())
+ result.stdout.fnmatch_lines(['*=== 1 passed in *'])
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_genscript.py b/testing/web-platform/tests/tools/pytest/testing/test_genscript.py
new file mode 100644
index 000000000..1260a5a6b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_genscript.py
@@ -0,0 +1,51 @@
+import pytest
+import sys
+
+
+@pytest.fixture(scope="module")
+def standalone(request):
+ return Standalone(request)
+
+class Standalone:
+ def __init__(self, request):
+ self.testdir = request.getfuncargvalue("testdir")
+ script = "mypytest"
+ result = self.testdir.runpytest("--genscript=%s" % script)
+ assert result.ret == 0
+ self.script = self.testdir.tmpdir.join(script)
+ assert self.script.check()
+
+ def run(self, anypython, testdir, *args):
+ return testdir._run(anypython, self.script, *args)
+
+def test_gen(testdir, anypython, standalone):
+ if sys.version_info >= (2,7):
+ result = testdir._run(anypython, "-c",
+ "import sys;print (sys.version_info >=(2,7))")
+ assert result.ret == 0
+ if result.stdout.str() == "False":
+ pytest.skip("genscript called from python2.7 cannot work "
+ "earlier python versions")
+ result = standalone.run(anypython, testdir, '--version')
+ if result.ret == 2:
+ result.stderr.fnmatch_lines(["*ERROR: setuptools not installed*"])
+ elif result.ret == 0:
+ result.stderr.fnmatch_lines([
+ "*imported from*mypytest*"
+ ])
+ p = testdir.makepyfile("def test_func(): assert 0")
+ result = standalone.run(anypython, testdir, p)
+ assert result.ret != 0
+ else:
+ pytest.fail("Unexpected return code")
+
+
+def test_freeze_includes():
+ """
+ Smoke test for freeze_includes(), to ensure that it works across all
+ supported python versions.
+ """
+ includes = pytest.freeze_includes()
+ assert len(includes) > 1
+ assert '_pytest.genscript' in includes
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_helpconfig.py b/testing/web-platform/tests/tools/pytest/testing/test_helpconfig.py
new file mode 100644
index 000000000..9f8d87b7c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_helpconfig.py
@@ -0,0 +1,69 @@
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+import pytest
+
+def test_version(testdir, pytestconfig):
+ result = testdir.runpytest("--version")
+ assert result.ret == 0
+ #p = py.path.local(py.__file__).dirpath()
+ result.stderr.fnmatch_lines([
+ '*pytest*%s*imported from*' % (pytest.__version__, )
+ ])
+ if pytestconfig.pluginmanager.list_plugin_distinfo():
+ result.stderr.fnmatch_lines([
+ "*setuptools registered plugins:",
+ "*at*",
+ ])
+
+def test_help(testdir):
+ result = testdir.runpytest("--help")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines("""
+ *-v*verbose*
+ *setup.cfg*
+ *minversion*
+ *to see*markers*py.test --markers*
+ *to see*fixtures*py.test --fixtures*
+ """)
+
+def test_hookvalidation_unknown(testdir):
+ testdir.makeconftest("""
+ def pytest_hello(xyz):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret != 0
+ result.stderr.fnmatch_lines([
+ '*unknown hook*pytest_hello*'
+ ])
+
+def test_hookvalidation_optional(testdir):
+ testdir.makeconftest("""
+ import pytest
+ @pytest.hookimpl(optionalhook=True)
+ def pytest_hello(xyz):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+def test_traceconfig(testdir):
+ result = testdir.runpytest("--traceconfig")
+ result.stdout.fnmatch_lines([
+ "*using*pytest*py*",
+ "*active plugins*",
+ ])
+
+def test_debug(testdir, monkeypatch):
+ result = testdir.runpytest_subprocess("--debug")
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ p = testdir.tmpdir.join("pytestdebug.log")
+ assert "pytest_sessionstart" in p.read()
+
+def test_PYTEST_DEBUG(testdir, monkeypatch):
+ monkeypatch.setenv("PYTEST_DEBUG", "1")
+ result = testdir.runpytest_subprocess()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result.stderr.fnmatch_lines([
+ "*pytest_plugin_registered*",
+ "*manager*PluginManager*"
+ ])
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_junitxml.py b/testing/web-platform/tests/tools/pytest/testing/test_junitxml.py
new file mode 100644
index 000000000..5960f8825
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_junitxml.py
@@ -0,0 +1,816 @@
+# -*- coding: utf-8 -*-
+
+from xml.dom import minidom
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+import py
+import sys
+import os
+from _pytest.junitxml import LogXML
+import pytest
+
+
+def runandparse(testdir, *args):
+ resultpath = testdir.tmpdir.join("junit.xml")
+ result = testdir.runpytest("--junitxml=%s" % resultpath, *args)
+ xmldoc = minidom.parse(str(resultpath))
+ return result, DomNode(xmldoc)
+
+
+def assert_attr(node, **kwargs):
+ __tracebackhide__ = True
+
+ def nodeval(node, name):
+ anode = node.getAttributeNode(name)
+ if anode is not None:
+ return anode.value
+
+ expected = dict((name, str(value)) for name, value in kwargs.items())
+ on_node = dict((name, nodeval(node, name)) for name in expected)
+ assert on_node == expected
+
+
+class DomNode(object):
+ def __init__(self, dom):
+ self.__node = dom
+
+ def __repr__(self):
+ return self.__node.toxml()
+
+ def find_first_by_tag(self, tag):
+ return self.find_nth_by_tag(tag, 0)
+
+ def _by_tag(self, tag):
+ return self.__node.getElementsByTagName(tag)
+
+ def find_nth_by_tag(self, tag, n):
+ items = self._by_tag(tag)
+ try:
+ nth = items[n]
+ except IndexError:
+ pass
+ else:
+ return type(self)(nth)
+
+ def find_by_tag(self, tag):
+ t = type(self)
+ return [t(x) for x in self.__node.getElementsByTagName(tag)]
+
+ def __getitem__(self, key):
+ node = self.__node.getAttributeNode(key)
+ if node is not None:
+ return node.value
+
+ def assert_attr(self, **kwargs):
+ __tracebackhide__ = True
+ return assert_attr(self.__node, **kwargs)
+
+ def toxml(self):
+ return self.__node.toxml()
+
+ @property
+ def text(self):
+ return self.__node.childNodes[0].wholeText
+
+ @property
+ def tag(self):
+ return self.__node.tagName
+
+ @property
+ def next_siebling(self):
+ return type(self)(self.__node.nextSibling)
+
+
+class TestPython:
+ def test_summing_simple(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_pass():
+ pass
+ def test_fail():
+ assert 0
+ def test_skip():
+ pytest.skip("")
+ @pytest.mark.xfail
+ def test_xfail():
+ assert 0
+ @pytest.mark.xfail
+ def test_xpass():
+ assert 1
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=2)
+
+ def test_timing_function(self, testdir):
+ testdir.makepyfile("""
+ import time, pytest
+ def setup_module():
+ time.sleep(0.01)
+ def teardown_module():
+ time.sleep(0.01)
+ def test_sleep():
+ time.sleep(0.01)
+ """)
+ result, dom = runandparse(testdir)
+ node = dom.find_first_by_tag("testsuite")
+ tnode = node.find_first_by_tag("testcase")
+ val = tnode["time"]
+ assert round(float(val), 2) >= 0.03
+
+ def test_setup_error(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__arg(request):
+ raise ValueError()
+ def test_function(arg):
+ pass
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(errors=1, tests=0)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_setup_error.py",
+ line="2",
+ classname="test_setup_error",
+ name="test_function")
+ fnode = tnode.find_first_by_tag("error")
+ fnode.assert_attr(message="test setup failure")
+ assert "ValueError" in fnode.toxml()
+
+ def test_skip_contains_name_reason(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_skip():
+ pytest.skip("hello23")
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret == 0
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(skips=1)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_skip_contains_name_reason.py",
+ line="1",
+ classname="test_skip_contains_name_reason",
+ name="test_skip")
+ snode = tnode.find_first_by_tag("skipped")
+ snode.assert_attr(type="pytest.skip", message="hello23", )
+
+ def test_classname_instance(self, testdir):
+ testdir.makepyfile("""
+ class TestClass:
+ def test_method(self):
+ assert 0
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(failures=1)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_classname_instance.py",
+ line="1",
+ classname="test_classname_instance.TestClass",
+ name="test_method")
+
+ def test_classname_nested_dir(self, testdir):
+ p = testdir.tmpdir.ensure("sub", "test_hello.py")
+ p.write("def test_func(): 0/0")
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(failures=1)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file=os.path.join("sub", "test_hello.py"),
+ line="0",
+ classname="sub.test_hello",
+ name="test_func")
+
+ def test_internal_error(self, testdir):
+ testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
+ testdir.makepyfile("def test_function(): pass")
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(errors=1, tests=0)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(classname="pytest", name="internal")
+ fnode = tnode.find_first_by_tag("error")
+ fnode.assert_attr(message="internal error")
+ assert "Division" in fnode.toxml()
+
+ def test_failure_function(self, testdir):
+ testdir.makepyfile("""
+ import sys
+ def test_fail():
+ print ("hello-stdout")
+ sys.stderr.write("hello-stderr\\n")
+ raise ValueError(42)
+ """)
+
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(failures=1, tests=1)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_failure_function.py",
+ line="1",
+ classname="test_failure_function",
+ name="test_fail")
+ fnode = tnode.find_first_by_tag("failure")
+ fnode.assert_attr(message="ValueError: 42")
+ assert "ValueError" in fnode.toxml()
+ systemout = fnode.next_siebling
+ assert systemout.tag == "system-out"
+ assert "hello-stdout" in systemout.toxml()
+ systemerr = systemout.next_siebling
+ assert systemerr.tag == "system-err"
+ assert "hello-stderr" in systemerr.toxml()
+
+ def test_failure_verbose_message(self, testdir):
+ testdir.makepyfile("""
+ import sys
+ def test_fail():
+ assert 0, "An error"
+ """)
+
+ result, dom = runandparse(testdir)
+ node = dom.find_first_by_tag("testsuite")
+ tnode = node.find_first_by_tag("testcase")
+ fnode = tnode.find_first_by_tag("failure")
+ fnode.assert_attr(message="AssertionError: An error assert 0")
+
+ def test_failure_escape(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize('arg1', "<&'", ids="<&'")
+ def test_func(arg1):
+ print(arg1)
+ assert 0
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(failures=3, tests=3)
+
+ for index, char in enumerate("<&'"):
+
+ tnode = node.find_nth_by_tag("testcase", index)
+ tnode.assert_attr(
+ file="test_failure_escape.py",
+ line="1",
+ classname="test_failure_escape",
+ name="test_func[%s]" % char)
+ sysout = tnode.find_first_by_tag('system-out')
+ text = sysout.text
+ assert text == '%s\n' % char
+
+ def test_junit_prefixing(self, testdir):
+ testdir.makepyfile("""
+ def test_func():
+ assert 0
+ class TestHello:
+ def test_hello(self):
+ pass
+ """)
+ result, dom = runandparse(testdir, "--junitprefix=xyz")
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(failures=1, tests=2)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_junit_prefixing.py",
+ line="0",
+ classname="xyz.test_junit_prefixing",
+ name="test_func")
+ tnode = node.find_nth_by_tag("testcase", 1)
+ tnode.assert_attr(
+ file="test_junit_prefixing.py",
+ line="3",
+ classname="xyz.test_junit_prefixing."
+ "TestHello",
+ name="test_hello")
+
+ def test_xfailure_function(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_xfail():
+ pytest.xfail("42")
+ """)
+ result, dom = runandparse(testdir)
+ assert not result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(skips=1, tests=0)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_xfailure_function.py",
+ line="1",
+ classname="test_xfailure_function",
+ name="test_xfail")
+ fnode = tnode.find_first_by_tag("skipped")
+ fnode.assert_attr(message="expected test failure")
+ # assert "ValueError" in fnode.toxml()
+
+ def test_xfailure_xpass(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail
+ def test_xpass():
+ pass
+ """)
+ result, dom = runandparse(testdir)
+ # assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(skips=1, tests=0)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_xfailure_xpass.py",
+ line="1",
+ classname="test_xfailure_xpass",
+ name="test_xpass")
+ fnode = tnode.find_first_by_tag("skipped")
+ fnode.assert_attr(message="xfail-marked test passes unexpectedly")
+ # assert "ValueError" in fnode.toxml()
+
+ def test_collect_error(self, testdir):
+ testdir.makepyfile("syntax error")
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(errors=1, tests=0)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_collect_error.py",
+ name="test_collect_error")
+ assert tnode["line"] is None
+ fnode = tnode.find_first_by_tag("error")
+ fnode.assert_attr(message="collection failure")
+ assert "SyntaxError" in fnode.toxml()
+
+ def test_collect_skipped(self, testdir):
+ testdir.makepyfile("import pytest; pytest.skip('xyz')")
+ result, dom = runandparse(testdir)
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(skips=1, tests=0)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(
+ file="test_collect_skipped.py",
+ name="test_collect_skipped")
+
+ # py.test doesn't give us a line here.
+ assert tnode["line"] is None
+
+ fnode = tnode.find_first_by_tag("skipped")
+ fnode.assert_attr(message="collection skipped")
+
+ def test_unicode(self, testdir):
+ value = 'hx\xc4\x85\xc4\x87\n'
+ testdir.makepyfile("""
+ # coding: latin1
+ def test_hello():
+ print (%r)
+ assert 0
+ """ % value)
+ result, dom = runandparse(testdir)
+ assert result.ret == 1
+ tnode = dom.find_first_by_tag("testcase")
+ fnode = tnode.find_first_by_tag("failure")
+ if not sys.platform.startswith("java"):
+ assert "hx" in fnode.toxml()
+
+ def test_assertion_binchars(self, testdir):
+ """this test did fail when the escaping wasnt strict"""
+ testdir.makepyfile("""
+
+ M1 = '\x01\x02\x03\x04'
+ M2 = '\x01\x02\x03\x05'
+
+ def test_str_compare():
+ assert M1 == M2
+ """)
+ result, dom = runandparse(testdir)
+ print(dom.toxml())
+
+ def test_pass_captures_stdout(self, testdir):
+ testdir.makepyfile("""
+ def test_pass():
+ print('hello-stdout')
+ """)
+ result, dom = runandparse(testdir)
+ node = dom.find_first_by_tag("testsuite")
+ pnode = node.find_first_by_tag("testcase")
+ systemout = pnode.find_first_by_tag("system-out")
+ assert "hello-stdout" in systemout.toxml()
+
+ def test_pass_captures_stderr(self, testdir):
+ testdir.makepyfile("""
+ import sys
+ def test_pass():
+ sys.stderr.write('hello-stderr')
+ """)
+ result, dom = runandparse(testdir)
+ node = dom.find_first_by_tag("testsuite")
+ pnode = node.find_first_by_tag("testcase")
+ systemout = pnode.find_first_by_tag("system-err")
+ assert "hello-stderr" in systemout.toxml()
+
+ def test_setup_error_captures_stdout(self, testdir):
+ testdir.makepyfile("""
+ def pytest_funcarg__arg(request):
+ print('hello-stdout')
+ raise ValueError()
+ def test_function(arg):
+ pass
+ """)
+ result, dom = runandparse(testdir)
+ node = dom.find_first_by_tag("testsuite")
+ pnode = node.find_first_by_tag("testcase")
+ systemout = pnode.find_first_by_tag("system-out")
+ assert "hello-stdout" in systemout.toxml()
+
+ def test_setup_error_captures_stderr(self, testdir):
+ testdir.makepyfile("""
+ import sys
+ def pytest_funcarg__arg(request):
+ sys.stderr.write('hello-stderr')
+ raise ValueError()
+ def test_function(arg):
+ pass
+ """)
+ result, dom = runandparse(testdir)
+ node = dom.find_first_by_tag("testsuite")
+ pnode = node.find_first_by_tag("testcase")
+ systemout = pnode.find_first_by_tag("system-err")
+ assert "hello-stderr" in systemout.toxml()
+
+
+def test_mangle_test_address():
+ from _pytest.junitxml import mangle_test_address
+ address = '::'.join(
+ ["a/my.py.thing.py", "Class", "()", "method", "[a-1-::]"])
+ newnames = mangle_test_address(address)
+ assert newnames == ["a.my.py.thing", "Class", "method", "[a-1-::]"]
+
+
+def test_dont_configure_on_slaves(tmpdir):
+ gotten = []
+
+ class FakeConfig:
+ def __init__(self):
+ self.pluginmanager = self
+ self.option = self
+
+ junitprefix = None
+ # XXX: shouldnt need tmpdir ?
+ xmlpath = str(tmpdir.join('junix.xml'))
+ register = gotten.append
+
+ fake_config = FakeConfig()
+ from _pytest import junitxml
+ junitxml.pytest_configure(fake_config)
+ assert len(gotten) == 1
+ FakeConfig.slaveinput = None
+ junitxml.pytest_configure(fake_config)
+ assert len(gotten) == 1
+
+
+class TestNonPython:
+ def test_summing_simple(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_collect_file(path, parent):
+ if path.ext == ".xyz":
+ return MyItem(path, parent)
+ class MyItem(pytest.Item):
+ def __init__(self, path, parent):
+ super(MyItem, self).__init__(path.basename, parent)
+ self.fspath = path
+ def runtest(self):
+ raise ValueError(42)
+ def repr_failure(self, excinfo):
+ return "custom item runtest failed"
+ """)
+ testdir.tmpdir.join("myfile.xyz").write("hello")
+ result, dom = runandparse(testdir)
+ assert result.ret
+ node = dom.find_first_by_tag("testsuite")
+ node.assert_attr(errors=0, failures=1, skips=0, tests=1)
+ tnode = node.find_first_by_tag("testcase")
+ tnode.assert_attr(name="myfile.xyz")
+ fnode = tnode.find_first_by_tag("failure")
+ fnode.assert_attr(message="custom item runtest failed")
+ assert "custom item runtest failed" in fnode.toxml()
+
+
+def test_nullbyte(testdir):
+ # A null byte can not occur in XML (see section 2.2 of the spec)
+ testdir.makepyfile("""
+ import sys
+ def test_print_nullbyte():
+ sys.stdout.write('Here the null -->' + chr(0) + '<--')
+ sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
+ assert False
+ """)
+ xmlf = testdir.tmpdir.join('junit.xml')
+ testdir.runpytest('--junitxml=%s' % xmlf)
+ text = xmlf.read()
+ assert '\x00' not in text
+ assert '#x00' in text
+
+
+def test_nullbyte_replace(testdir):
+ # Check if the null byte gets replaced
+ testdir.makepyfile("""
+ import sys
+ def test_print_nullbyte():
+ sys.stdout.write('Here the null -->' + chr(0) + '<--')
+ sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
+ assert False
+ """)
+ xmlf = testdir.tmpdir.join('junit.xml')
+ testdir.runpytest('--junitxml=%s' % xmlf)
+ text = xmlf.read()
+ assert '#x0' in text
+
+
+def test_invalid_xml_escape():
+ # Test some more invalid xml chars, the full range should be
+ # tested really but let's just thest the edges of the ranges
+ # intead.
+ # XXX This only tests low unicode character points for now as
+ # there are some issues with the testing infrastructure for
+ # the higher ones.
+ # XXX Testing 0xD (\r) is tricky as it overwrites the just written
+ # line in the output, so we skip it too.
+ global unichr
+ try:
+ unichr(65)
+ except NameError:
+ unichr = chr
+ invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19, 27, # issue #126
+ 0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) # , 0x110000)
+ valid = (0x9, 0xA, 0x20, )
+ # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
+
+ from _pytest.junitxml import bin_xml_escape
+
+ for i in invalid:
+ got = bin_xml_escape(unichr(i)).uniobj
+ if i <= 0xFF:
+ expected = '#x%02X' % i
+ else:
+ expected = '#x%04X' % i
+ assert got == expected
+ for i in valid:
+ assert chr(i) == bin_xml_escape(unichr(i)).uniobj
+
+
+def test_logxml_path_expansion(tmpdir, monkeypatch):
+ home_tilde = py.path.local(os.path.expanduser('~')).join('test.xml')
+
+ xml_tilde = LogXML('~%stest.xml' % tmpdir.sep, None)
+ assert xml_tilde.logfile == home_tilde
+
+ # this is here for when $HOME is not set correct
+ monkeypatch.setenv("HOME", tmpdir)
+ home_var = os.path.normpath(os.path.expandvars('$HOME/test.xml'))
+
+ xml_var = LogXML('$HOME%stest.xml' % tmpdir.sep, None)
+ assert xml_var.logfile == home_var
+
+
+def test_logxml_changingdir(testdir):
+ testdir.makepyfile("""
+ def test_func():
+ import os
+ os.chdir("a")
+ """)
+ testdir.tmpdir.mkdir("a")
+ result = testdir.runpytest("--junitxml=a/x.xml")
+ assert result.ret == 0
+ assert testdir.tmpdir.join("a/x.xml").check()
+
+
+def test_logxml_makedir(testdir):
+ """--junitxml should automatically create directories for the xml file"""
+ testdir.makepyfile("""
+ def test_pass():
+ pass
+ """)
+ result = testdir.runpytest("--junitxml=path/to/results.xml")
+ assert result.ret == 0
+ assert testdir.tmpdir.join("path/to/results.xml").check()
+
+
+def test_escaped_parametrized_names_xml(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize('char', ["\\x00"])
+ def test_func(char):
+ assert char
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret == 0
+ node = dom.find_first_by_tag("testcase")
+ node.assert_attr(name="test_func[#x00]")
+
+
+def test_double_colon_split_function_issue469(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize('param', ["double::colon"])
+ def test_func(param):
+ pass
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret == 0
+ node = dom.find_first_by_tag("testcase")
+ node.assert_attr(classname="test_double_colon_split_function_issue469")
+ node.assert_attr(name='test_func[double::colon]')
+
+
+def test_double_colon_split_method_issue469(testdir):
+ testdir.makepyfile("""
+ import pytest
+ class TestClass:
+ @pytest.mark.parametrize('param', ["double::colon"])
+ def test_func(self, param):
+ pass
+ """)
+ result, dom = runandparse(testdir)
+ assert result.ret == 0
+ node = dom.find_first_by_tag("testcase")
+ node.assert_attr(
+ classname="test_double_colon_split_method_issue469.TestClass")
+ node.assert_attr(name='test_func[double::colon]')
+
+
+def test_unicode_issue368(testdir):
+ path = testdir.tmpdir.join("test.xml")
+ log = LogXML(str(path), None)
+ ustr = py.builtin._totext("Ð’ÐИ!", "utf-8")
+ from _pytest.runner import BaseReport
+
+ class Report(BaseReport):
+ longrepr = ustr
+ sections = []
+ nodeid = "something"
+ location = 'tests/filename.py', 42, 'TestClass.method'
+
+ test_report = Report()
+
+ # hopefully this is not too brittle ...
+ log.pytest_sessionstart()
+ node_reporter = log._opentestcase(test_report)
+ node_reporter.append_failure(test_report)
+ node_reporter.append_collect_error(test_report)
+ node_reporter.append_collect_skipped(test_report)
+ node_reporter.append_error(test_report)
+ test_report.longrepr = "filename", 1, ustr
+ node_reporter.append_skipped(test_report)
+ test_report.longrepr = "filename", 1, "Skipped: å¡å˜£å˜£"
+ node_reporter.append_skipped(test_report)
+ test_report.wasxfail = ustr
+ node_reporter.append_skipped(test_report)
+ log.pytest_sessionfinish()
+
+
+def test_record_property(testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def other(record_xml_property):
+ record_xml_property("bar", 1)
+ def test_record(record_xml_property, other):
+ record_xml_property("foo", "<1");
+ """)
+ result, dom = runandparse(testdir, '-rw')
+ node = dom.find_first_by_tag("testsuite")
+ tnode = node.find_first_by_tag("testcase")
+ psnode = tnode.find_first_by_tag('properties')
+ pnodes = psnode.find_by_tag('property')
+ pnodes[0].assert_attr(name="bar", value="1")
+ pnodes[1].assert_attr(name="foo", value="<1")
+ result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*')
+
+
+def test_record_property_same_name(testdir):
+ testdir.makepyfile("""
+ def test_record_with_same_name(record_xml_property):
+ record_xml_property("foo", "bar")
+ record_xml_property("foo", "baz")
+ """)
+ result, dom = runandparse(testdir, '-rw')
+ node = dom.find_first_by_tag("testsuite")
+ tnode = node.find_first_by_tag("testcase")
+ psnode = tnode.find_first_by_tag('properties')
+ pnodes = psnode.find_by_tag('property')
+ pnodes[0].assert_attr(name="foo", value="bar")
+ pnodes[1].assert_attr(name="foo", value="baz")
+
+
+def test_random_report_log_xdist(testdir):
+ """xdist calls pytest_runtest_logreport as they are executed by the slaves,
+ with nodes from several nodes overlapping, so junitxml must cope with that
+ to produce correct reports. #1064
+ """
+ pytest.importorskip('xdist')
+ testdir.makepyfile("""
+ import pytest, time
+ @pytest.mark.parametrize('i', list(range(30)))
+ def test_x(i):
+ assert i != 22
+ """)
+ _, dom = runandparse(testdir, '-n2')
+ suite_node = dom.find_first_by_tag("testsuite")
+ failed = []
+ for case_node in suite_node.find_by_tag("testcase"):
+ if case_node.find_first_by_tag('failure'):
+ failed.append(case_node['name'])
+
+ assert failed == ['test_x[22]']
+
+
+def test_runs_twice(testdir):
+ f = testdir.makepyfile('''
+ def test_pass():
+ pass
+ ''')
+
+ result, dom = runandparse(testdir, f, f)
+ assert 'INTERNALERROR' not in result.stdout.str()
+ first, second = [x['classname'] for x in dom.find_by_tag("testcase")]
+ assert first == second
+
+
+@pytest.mark.xfail(reason='hangs', run=False)
+def test_runs_twice_xdist(testdir):
+ pytest.importorskip('xdist')
+ f = testdir.makepyfile('''
+ def test_pass():
+ pass
+ ''')
+
+ result, dom = runandparse(
+ testdir, f,
+ '--dist', 'each', '--tx', '2*popen',)
+ assert 'INTERNALERROR' not in result.stdout.str()
+ first, second = [x['classname'] for x in dom.find_by_tag("testcase")]
+ assert first == second
+
+
+def test_fancy_items_regression(testdir):
+ # issue 1259
+ testdir.makeconftest("""
+ import pytest
+ class FunItem(pytest.Item):
+ def runtest(self):
+ pass
+ class NoFunItem(pytest.Item):
+ def runtest(self):
+ pass
+
+ class FunCollector(pytest.File):
+ def collect(self):
+ return [
+ FunItem('a', self),
+ NoFunItem('a', self),
+ NoFunItem('b', self),
+ ]
+
+ def pytest_collect_file(path, parent):
+ if path.check(ext='.py'):
+ return FunCollector(path, parent)
+ """)
+
+ testdir.makepyfile('''
+ def test_pass():
+ pass
+ ''')
+
+ result, dom = runandparse(testdir)
+
+ assert 'INTERNALERROR' not in result.stdout.str()
+
+ items = sorted(
+ '%(classname)s %(name)s %(file)s' % x
+
+ for x in dom.find_by_tag("testcase"))
+ import pprint
+ pprint.pprint(items)
+ assert items == [
+ u'conftest a conftest.py',
+ u'conftest a conftest.py',
+ u'conftest b conftest.py',
+ u'test_fancy_items_regression a test_fancy_items_regression.py',
+ u'test_fancy_items_regression a test_fancy_items_regression.py',
+ u'test_fancy_items_regression b test_fancy_items_regression.py',
+ u'test_fancy_items_regression test_pass'
+ u' test_fancy_items_regression.py',
+ ]
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_mark.py b/testing/web-platform/tests/tools/pytest/testing/test_mark.py
new file mode 100644
index 000000000..aa1be6f7c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_mark.py
@@ -0,0 +1,672 @@
+import os
+
+import py, pytest
+from _pytest.mark import MarkGenerator as Mark
+
+class TestMark:
+ def test_markinfo_repr(self):
+ from _pytest.mark import MarkInfo
+ m = MarkInfo("hello", (1,2), {})
+ repr(m)
+
+ def test_pytest_exists_in_namespace_all(self):
+ assert 'mark' in py.test.__all__
+ assert 'mark' in pytest.__all__
+
+ def test_pytest_mark_notcallable(self):
+ mark = Mark()
+ pytest.raises((AttributeError, TypeError), mark)
+
+ def test_pytest_mark_name_starts_with_underscore(self):
+ mark = Mark()
+ pytest.raises(AttributeError, getattr, mark, '_some_name')
+
+ def test_pytest_mark_bare(self):
+ mark = Mark()
+ def f():
+ pass
+ mark.hello(f)
+ assert f.hello
+
+ def test_pytest_mark_keywords(self):
+ mark = Mark()
+ def f():
+ pass
+ mark.world(x=3, y=4)(f)
+ assert f.world
+ assert f.world.kwargs['x'] == 3
+ assert f.world.kwargs['y'] == 4
+
+ def test_apply_multiple_and_merge(self):
+ mark = Mark()
+ def f():
+ pass
+ mark.world
+ mark.world(x=3)(f)
+ assert f.world.kwargs['x'] == 3
+ mark.world(y=4)(f)
+ assert f.world.kwargs['x'] == 3
+ assert f.world.kwargs['y'] == 4
+ mark.world(y=1)(f)
+ assert f.world.kwargs['y'] == 1
+ assert len(f.world.args) == 0
+
+ def test_pytest_mark_positional(self):
+ mark = Mark()
+ def f():
+ pass
+ mark.world("hello")(f)
+ assert f.world.args[0] == "hello"
+ mark.world("world")(f)
+
+ def test_pytest_mark_positional_func_and_keyword(self):
+ mark = Mark()
+ def f():
+ raise Exception
+ m = mark.world(f, omega="hello")
+ def g():
+ pass
+ assert m(g) == g
+ assert g.world.args[0] is f
+ assert g.world.kwargs["omega"] == "hello"
+
+ def test_pytest_mark_reuse(self):
+ mark = Mark()
+ def f():
+ pass
+ w = mark.some
+ w("hello", reason="123")(f)
+ assert f.some.args[0] == "hello"
+ assert f.some.kwargs['reason'] == "123"
+ def g():
+ pass
+ w("world", reason2="456")(g)
+ assert g.some.args[0] == "world"
+ assert 'reason' not in g.some.kwargs
+ assert g.some.kwargs['reason2'] == "456"
+
+
+def test_marked_class_run_twice(testdir, request):
+ """Test fails file is run twice that contains marked class.
+ See issue#683.
+ """
+ py_file = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize('abc', [1, 2, 3])
+ class Test1(object):
+ def test_1(self, abc):
+ assert abc in [1, 2, 3]
+ """)
+ file_name = os.path.basename(py_file.strpath)
+ rec = testdir.inline_run(file_name, file_name)
+ rec.assertoutcome(passed=6)
+
+
+def test_ini_markers(testdir):
+ testdir.makeini("""
+ [pytest]
+ markers =
+ a1: this is a webtest marker
+ a2: this is a smoke marker
+ """)
+ testdir.makepyfile("""
+ def test_markers(pytestconfig):
+ markers = pytestconfig.getini("markers")
+ print (markers)
+ assert len(markers) >= 2
+ assert markers[0].startswith("a1:")
+ assert markers[1].startswith("a2:")
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(passed=1)
+
+def test_markers_option(testdir):
+ testdir.makeini("""
+ [pytest]
+ markers =
+ a1: this is a webtest marker
+ a1some: another marker
+ """)
+ result = testdir.runpytest("--markers", )
+ result.stdout.fnmatch_lines([
+ "*a1*this is a webtest*",
+ "*a1some*another marker",
+ ])
+
+def test_markers_option_with_plugin_in_current_dir(testdir):
+ testdir.makeconftest('pytest_plugins = "flip_flop"')
+ testdir.makepyfile(flip_flop="""\
+ def pytest_configure(config):
+ config.addinivalue_line("markers", "flip:flop")
+
+ def pytest_generate_tests(metafunc):
+ try:
+ mark = metafunc.function.flipper
+ except AttributeError:
+ return
+ metafunc.parametrize("x", (10, 20))""")
+ testdir.makepyfile("""\
+ import pytest
+ @pytest.mark.flipper
+ def test_example(x):
+ assert x""")
+
+ result = testdir.runpytest("--markers")
+ result.stdout.fnmatch_lines(["*flip*flop*"])
+
+
+def test_mark_on_pseudo_function(testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.r(lambda x: 0/0)
+ def test_hello():
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+def test_strict_prohibits_unregistered_markers(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.unregisteredmark
+ def test_hello():
+ pass
+ """)
+ result = testdir.runpytest("--strict")
+ assert result.ret != 0
+ result.stdout.fnmatch_lines([
+ "*unregisteredmark*not*registered*",
+ ])
+
+@pytest.mark.parametrize("spec", [
+ ("xyz", ("test_one",)),
+ ("xyz and xyz2", ()),
+ ("xyz2", ("test_two",)),
+ ("xyz or xyz2", ("test_one", "test_two"),)
+])
+def test_mark_option(spec, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xyz
+ def test_one():
+ pass
+ @pytest.mark.xyz2
+ def test_two():
+ pass
+ """)
+ opt, passed_result = spec
+ rec = testdir.inline_run("-m", opt)
+ passed, skipped, fail = rec.listoutcomes()
+ passed = [x.nodeid.split("::")[-1] for x in passed]
+ assert len(passed) == len(passed_result)
+ assert list(passed) == list(passed_result)
+
+@pytest.mark.parametrize("spec", [
+ ("interface", ("test_interface",)),
+ ("not interface", ("test_nointer",)),
+])
+def test_mark_option_custom(spec, testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_collection_modifyitems(items):
+ for item in items:
+ if "interface" in item.nodeid:
+ item.keywords["interface"] = pytest.mark.interface
+ """)
+ testdir.makepyfile("""
+ def test_interface():
+ pass
+ def test_nointer():
+ pass
+ """)
+ opt, passed_result = spec
+ rec = testdir.inline_run("-m", opt)
+ passed, skipped, fail = rec.listoutcomes()
+ passed = [x.nodeid.split("::")[-1] for x in passed]
+ assert len(passed) == len(passed_result)
+ assert list(passed) == list(passed_result)
+
+@pytest.mark.parametrize("spec", [
+ ("interface", ("test_interface",)),
+ ("not interface", ("test_nointer", "test_pass")),
+ ("pass", ("test_pass",)),
+ ("not pass", ("test_interface", "test_nointer")),
+])
+def test_keyword_option_custom(spec, testdir):
+ testdir.makepyfile("""
+ def test_interface():
+ pass
+ def test_nointer():
+ pass
+ def test_pass():
+ pass
+ """)
+ opt, passed_result = spec
+ rec = testdir.inline_run("-k", opt)
+ passed, skipped, fail = rec.listoutcomes()
+ passed = [x.nodeid.split("::")[-1] for x in passed]
+ assert len(passed) == len(passed_result)
+ assert list(passed) == list(passed_result)
+
+
+@pytest.mark.parametrize("spec", [
+ ("None", ("test_func[None]",)),
+ ("1.3", ("test_func[1.3]",)),
+ ("2-3", ("test_func[2-3]",))
+])
+def test_keyword_option_parametrize(spec, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
+ def test_func(arg):
+ pass
+ """)
+ opt, passed_result = spec
+ rec = testdir.inline_run("-k", opt)
+ passed, skipped, fail = rec.listoutcomes()
+ passed = [x.nodeid.split("::")[-1] for x in passed]
+ assert len(passed) == len(passed_result)
+ assert list(passed) == list(passed_result)
+
+
+def test_parametrized_collected_from_command_line(testdir):
+ """Parametrized test not collected if test named specified
+ in command line issue#649.
+ """
+ py_file = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
+ def test_func(arg):
+ pass
+ """)
+ file_name = os.path.basename(py_file.strpath)
+ rec = testdir.inline_run(file_name + "::" + "test_func")
+ rec.assertoutcome(passed=3)
+
+
+class TestFunctional:
+
+ def test_mark_per_function(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.hello
+ def test_hello():
+ assert hasattr(test_hello, 'hello')
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_mark_per_module(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ pytestmark = pytest.mark.hello
+ def test_func():
+ pass
+ """)
+ keywords = item.keywords
+ assert 'hello' in keywords
+
+ def test_marklist_per_class(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ class TestClass:
+ pytestmark = [pytest.mark.hello, pytest.mark.world]
+ def test_func(self):
+ assert TestClass.test_func.hello
+ assert TestClass.test_func.world
+ """)
+ keywords = item.keywords
+ assert 'hello' in keywords
+
+ def test_marklist_per_module(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ pytestmark = [pytest.mark.hello, pytest.mark.world]
+ class TestClass:
+ def test_func(self):
+ assert TestClass.test_func.hello
+ assert TestClass.test_func.world
+ """)
+ keywords = item.keywords
+ assert 'hello' in keywords
+ assert 'world' in keywords
+
+ def test_mark_per_class_decorator(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.hello
+ class TestClass:
+ def test_func(self):
+ assert TestClass.test_func.hello
+ """)
+ keywords = item.keywords
+ assert 'hello' in keywords
+
+ def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.hello
+ class TestClass:
+ pytestmark = pytest.mark.world
+ def test_func(self):
+ assert TestClass.test_func.hello
+ assert TestClass.test_func.world
+ """)
+ keywords = item.keywords
+ assert 'hello' in keywords
+ assert 'world' in keywords
+
+ def test_merging_markers(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ pytestmark = pytest.mark.hello("pos1", x=1, y=2)
+ class TestClass:
+ # classlevel overrides module level
+ pytestmark = pytest.mark.hello(x=3)
+ @pytest.mark.hello("pos0", z=4)
+ def test_func(self):
+ pass
+ """)
+ items, rec = testdir.inline_genitems(p)
+ item, = items
+ keywords = item.keywords
+ marker = keywords['hello']
+ assert marker.args == ("pos0", "pos1")
+ assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4}
+
+ # test the new __iter__ interface
+ l = list(marker)
+ assert len(l) == 3
+ assert l[0].args == ("pos0",)
+ assert l[1].args == ()
+ assert l[2].args == ("pos1", )
+
+ @pytest.mark.xfail(reason='unfixed')
+ def test_merging_markers_deep(self, testdir):
+ # issue 199 - propagate markers into nested classes
+ p = testdir.makepyfile("""
+ import pytest
+ class TestA:
+ pytestmark = pytest.mark.a
+ def test_b(self):
+ assert True
+ class TestC:
+ # this one didnt get marked
+ def test_d(self):
+ assert True
+ """)
+ items, rec = testdir.inline_genitems(p)
+ for item in items:
+ print (item, item.keywords)
+ assert 'a' in item.keywords
+
+ def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.a
+ class Base: pass
+
+ @pytest.mark.b
+ class Test1(Base):
+ def test_foo(self): pass
+
+ class Test2(Base):
+ def test_bar(self): pass
+ """)
+ items, rec = testdir.inline_genitems(p)
+ self.assert_markers(items, test_foo=('a', 'b'), test_bar=('a',))
+
+ def test_mark_decorator_baseclasses_merged(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.a
+ class Base: pass
+
+ @pytest.mark.b
+ class Base2(Base): pass
+
+ @pytest.mark.c
+ class Test1(Base2):
+ def test_foo(self): pass
+
+ class Test2(Base2):
+ @pytest.mark.d
+ def test_bar(self): pass
+ """)
+ items, rec = testdir.inline_genitems(p)
+ self.assert_markers(items, test_foo=('a', 'b', 'c'),
+ test_bar=('a', 'b', 'd'))
+
+ def test_mark_with_wrong_marker(self, testdir):
+ reprec = testdir.inline_runsource("""
+ import pytest
+ class pytestmark:
+ pass
+ def test_func():
+ pass
+ """)
+ l = reprec.getfailedcollections()
+ assert len(l) == 1
+ assert "TypeError" in str(l[0].longrepr)
+
+ def test_mark_dynamically_in_funcarg(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_funcarg__arg(request):
+ request.applymarker(pytest.mark.hello)
+ def pytest_terminal_summary(terminalreporter):
+ l = terminalreporter.stats['passed']
+ terminalreporter.writer.line("keyword: %s" % l[0].keywords)
+ """)
+ testdir.makepyfile("""
+ def test_func(arg):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "keyword: *hello*"
+ ])
+
+ def test_merging_markers_two_functions(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.hello("pos1", z=4)
+ @pytest.mark.hello("pos0", z=3)
+ def test_func():
+ pass
+ """)
+ items, rec = testdir.inline_genitems(p)
+ item, = items
+ keywords = item.keywords
+ marker = keywords['hello']
+ l = list(marker)
+ assert len(l) == 2
+ assert l[0].args == ("pos0",)
+ assert l[1].args == ("pos1",)
+
+ def test_no_marker_match_on_unmarked_names(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.shouldmatch
+ def test_marked():
+ assert 1
+
+ def test_unmarked():
+ assert 1
+ """)
+ reprec = testdir.inline_run("-m", "test_unmarked", p)
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(passed) + len(skipped) + len(failed) == 0
+ dlist = reprec.getcalls("pytest_deselected")
+ deselected_tests = dlist[0].items
+ assert len(deselected_tests) == 2
+
+ def test_keywords_at_node_level(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope="session", autouse=True)
+ def some(request):
+ request.keywords["hello"] = 42
+ assert "world" not in request.keywords
+
+ @pytest.fixture(scope="function", autouse=True)
+ def funcsetup(request):
+ assert "world" in request.keywords
+ assert "hello" in request.keywords
+
+ @pytest.mark.world
+ def test_function():
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_keyword_added_for_session(self, testdir):
+ testdir.makeconftest("""
+ import pytest
+ def pytest_collection_modifyitems(session):
+ session.add_marker("mark1")
+ session.add_marker(pytest.mark.mark2)
+ session.add_marker(pytest.mark.mark3)
+ pytest.raises(ValueError, lambda:
+ session.add_marker(10))
+ """)
+ testdir.makepyfile("""
+ def test_some(request):
+ assert "mark1" in request.keywords
+ assert "mark2" in request.keywords
+ assert "mark3" in request.keywords
+ assert 10 not in request.keywords
+ marker = request.node.get_marker("mark1")
+ assert marker.name == "mark1"
+ assert marker.args == ()
+ assert marker.kwargs == {}
+ """)
+ reprec = testdir.inline_run("-m", "mark1")
+ reprec.assertoutcome(passed=1)
+
+ def assert_markers(self, items, **expected):
+ """assert that given items have expected marker names applied to them.
+ expected should be a dict of (item name -> seq of expected marker names)
+
+ .. note:: this could be moved to ``testdir`` if proven to be useful
+ to other modules.
+ """
+ from _pytest.mark import MarkInfo
+ items = dict((x.name, x) for x in items)
+ for name, expected_markers in expected.items():
+ markers = items[name].keywords._markers
+ marker_names = set([name for (name, v) in markers.items()
+ if isinstance(v, MarkInfo)])
+ assert marker_names == set(expected_markers)
+
+
+class TestKeywordSelection:
+ def test_select_simple(self, testdir):
+ file_test = testdir.makepyfile("""
+ def test_one():
+ assert 0
+ class TestClass(object):
+ def test_method_one(self):
+ assert 42 == 43
+ """)
+ def check(keyword, name):
+ reprec = testdir.inline_run("-s", "-k", keyword, file_test)
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(failed) == 1
+ assert failed[0].nodeid.split("::")[-1] == name
+ assert len(reprec.getcalls('pytest_deselected')) == 1
+
+ for keyword in ['test_one', 'est_on']:
+ check(keyword, 'test_one')
+ check('TestClass and test', 'test_method_one')
+
+ @pytest.mark.parametrize("keyword", [
+ 'xxx', 'xxx and test_2', 'TestClass', 'xxx and not test_1',
+ 'TestClass and test_2', 'xxx and TestClass and test_2'])
+ def test_select_extra_keywords(self, testdir, keyword):
+ p = testdir.makepyfile(test_select="""
+ def test_1():
+ pass
+ class TestClass:
+ def test_2(self):
+ pass
+ """)
+ testdir.makepyfile(conftest="""
+ import pytest
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_pycollect_makeitem(name):
+ outcome = yield
+ if name == "TestClass":
+ item = outcome.get_result()
+ item.extra_keyword_matches.add("xxx")
+ """)
+ reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword)
+ py.builtin.print_("keyword", repr(keyword))
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(passed) == 1
+ assert passed[0].nodeid.endswith("test_2")
+ dlist = reprec.getcalls("pytest_deselected")
+ assert len(dlist) == 1
+ assert dlist[0].items[0].name == 'test_1'
+
+ def test_select_starton(self, testdir):
+ threepass = testdir.makepyfile(test_threepass="""
+ def test_one(): assert 1
+ def test_two(): assert 1
+ def test_three(): assert 1
+ """)
+ reprec = testdir.inline_run("-k", "test_two:", threepass)
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(passed) == 2
+ assert not failed
+ dlist = reprec.getcalls("pytest_deselected")
+ assert len(dlist) == 1
+ item = dlist[0].items[0]
+ assert item.name == "test_one"
+
+ def test_keyword_extra(self, testdir):
+ p = testdir.makepyfile("""
+ def test_one():
+ assert 0
+ test_one.mykeyword = True
+ """)
+ reprec = testdir.inline_run("-k", "mykeyword", p)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 1
+
+ @pytest.mark.xfail
+ def test_keyword_extra_dash(self, testdir):
+ p = testdir.makepyfile("""
+ def test_one():
+ assert 0
+ test_one.mykeyword = True
+ """)
+ # with argparse the argument to an option cannot
+ # start with '-'
+ reprec = testdir.inline_run("-k", "-mykeyword", p)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert passed + skipped + failed == 0
+
+ def test_no_magic_values(self, testdir):
+ """Make sure the tests do not match on magic values,
+ no double underscored values, like '__dict__',
+ and no instance values, like '()'.
+ """
+ p = testdir.makepyfile("""
+ def test_one(): assert 1
+ """)
+ def assert_test_is_not_selected(keyword):
+ reprec = testdir.inline_run("-k", keyword, p)
+ passed, skipped, failed = reprec.countoutcomes()
+ dlist = reprec.getcalls("pytest_deselected")
+ assert passed + skipped + failed == 0
+ deselected_tests = dlist[0].items
+ assert len(deselected_tests) == 1
+
+ assert_test_is_not_selected("__")
+ assert_test_is_not_selected("()")
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_monkeypatch.py b/testing/web-platform/tests/tools/pytest/testing/test_monkeypatch.py
new file mode 100644
index 000000000..048c942c8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_monkeypatch.py
@@ -0,0 +1,330 @@
+import os
+import sys
+import textwrap
+
+import pytest
+from _pytest.monkeypatch import monkeypatch as MonkeyPatch
+
+
+def pytest_funcarg__mp(request):
+ cwd = os.getcwd()
+ sys_path = list(sys.path)
+
+ def cleanup():
+ sys.path[:] = sys_path
+ os.chdir(cwd)
+
+ request.addfinalizer(cleanup)
+ return MonkeyPatch()
+
+
+def test_setattr():
+ class A:
+ x = 1
+
+ monkeypatch = MonkeyPatch()
+ pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
+ monkeypatch.setattr(A, 'y', 2, raising=False)
+ assert A.y == 2
+ monkeypatch.undo()
+ assert not hasattr(A, 'y')
+
+ monkeypatch = MonkeyPatch()
+ monkeypatch.setattr(A, 'x', 2)
+ assert A.x == 2
+ monkeypatch.setattr(A, 'x', 3)
+ assert A.x == 3
+ monkeypatch.undo()
+ assert A.x == 1
+
+ A.x = 5
+ monkeypatch.undo() # double-undo makes no modification
+ assert A.x == 5
+
+
+class TestSetattrWithImportPath:
+ def test_string_expression(self, monkeypatch):
+ monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
+ assert os.path.abspath("123") == "hello2"
+
+ def test_string_expression_class(self, monkeypatch):
+ monkeypatch.setattr("_pytest.config.Config", 42)
+ import _pytest
+ assert _pytest.config.Config == 42
+
+ def test_unicode_string(self, monkeypatch):
+ monkeypatch.setattr("_pytest.config.Config", 42)
+ import _pytest
+ assert _pytest.config.Config == 42
+ monkeypatch.delattr("_pytest.config.Config")
+
+ def test_wrong_target(self, monkeypatch):
+ pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
+
+ def test_unknown_import(self, monkeypatch):
+ pytest.raises(ImportError,
+ lambda: monkeypatch.setattr("unkn123.classx", None))
+
+ def test_unknown_attr(self, monkeypatch):
+ pytest.raises(AttributeError,
+ lambda: monkeypatch.setattr("os.path.qweqwe", None))
+
+ def test_unknown_attr_non_raising(self, monkeypatch):
+ # https://github.com/pytest-dev/pytest/issues/746
+ monkeypatch.setattr('os.path.qweqwe', 42, raising=False)
+ assert os.path.qweqwe == 42
+
+ def test_delattr(self, monkeypatch):
+ monkeypatch.delattr("os.path.abspath")
+ assert not hasattr(os.path, "abspath")
+ monkeypatch.undo()
+ assert os.path.abspath
+
+
+def test_delattr():
+ class A:
+ x = 1
+
+ monkeypatch = MonkeyPatch()
+ monkeypatch.delattr(A, 'x')
+ assert not hasattr(A, 'x')
+ monkeypatch.undo()
+ assert A.x == 1
+
+ monkeypatch = MonkeyPatch()
+ monkeypatch.delattr(A, 'x')
+ pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
+ monkeypatch.delattr(A, 'y', raising=False)
+ monkeypatch.setattr(A, 'x', 5, raising=False)
+ assert A.x == 5
+ monkeypatch.undo()
+ assert A.x == 1
+
+
+def test_setitem():
+ d = {'x': 1}
+ monkeypatch = MonkeyPatch()
+ monkeypatch.setitem(d, 'x', 2)
+ monkeypatch.setitem(d, 'y', 1700)
+ monkeypatch.setitem(d, 'y', 1700)
+ assert d['x'] == 2
+ assert d['y'] == 1700
+ monkeypatch.setitem(d, 'x', 3)
+ assert d['x'] == 3
+ monkeypatch.undo()
+ assert d['x'] == 1
+ assert 'y' not in d
+ d['x'] = 5
+ monkeypatch.undo()
+ assert d['x'] == 5
+
+
+def test_setitem_deleted_meanwhile():
+ d = {}
+ monkeypatch = MonkeyPatch()
+ monkeypatch.setitem(d, 'x', 2)
+ del d['x']
+ monkeypatch.undo()
+ assert not d
+
+
+@pytest.mark.parametrize("before", [True, False])
+def test_setenv_deleted_meanwhile(before):
+ key = "qwpeoip123"
+ if before:
+ os.environ[key] = "world"
+ monkeypatch = MonkeyPatch()
+ monkeypatch.setenv(key, 'hello')
+ del os.environ[key]
+ monkeypatch.undo()
+ if before:
+ assert os.environ[key] == "world"
+ del os.environ[key]
+ else:
+ assert key not in os.environ
+
+
+def test_delitem():
+ d = {'x': 1}
+ monkeypatch = MonkeyPatch()
+ monkeypatch.delitem(d, 'x')
+ assert 'x' not in d
+ monkeypatch.delitem(d, 'y', raising=False)
+ pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
+ assert not d
+ monkeypatch.setitem(d, 'y', 1700)
+ assert d['y'] == 1700
+ d['hello'] = 'world'
+ monkeypatch.setitem(d, 'x', 1500)
+ assert d['x'] == 1500
+ monkeypatch.undo()
+ assert d == {'hello': 'world', 'x': 1}
+
+
+def test_setenv():
+ monkeypatch = MonkeyPatch()
+ monkeypatch.setenv('XYZ123', 2)
+ import os
+ assert os.environ['XYZ123'] == "2"
+ monkeypatch.undo()
+ assert 'XYZ123' not in os.environ
+
+
+def test_delenv():
+ name = 'xyz1234'
+ assert name not in os.environ
+ monkeypatch = MonkeyPatch()
+ pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
+ monkeypatch.delenv(name, raising=False)
+ monkeypatch.undo()
+ os.environ[name] = "1"
+ try:
+ monkeypatch = MonkeyPatch()
+ monkeypatch.delenv(name)
+ assert name not in os.environ
+ monkeypatch.setenv(name, "3")
+ assert os.environ[name] == "3"
+ monkeypatch.undo()
+ assert os.environ[name] == "1"
+ finally:
+ if name in os.environ:
+ del os.environ[name]
+
+
+def test_setenv_prepend():
+ import os
+ monkeypatch = MonkeyPatch()
+ monkeypatch.setenv('XYZ123', 2, prepend="-")
+ assert os.environ['XYZ123'] == "2"
+ monkeypatch.setenv('XYZ123', 3, prepend="-")
+ assert os.environ['XYZ123'] == "3-2"
+ monkeypatch.undo()
+ assert 'XYZ123' not in os.environ
+
+
+def test_monkeypatch_plugin(testdir):
+ reprec = testdir.inline_runsource("""
+ def test_method(monkeypatch):
+ assert monkeypatch.__class__.__name__ == "monkeypatch"
+ """)
+ res = reprec.countoutcomes()
+ assert tuple(res) == (1, 0, 0), res
+
+
+def test_syspath_prepend(mp):
+ old = list(sys.path)
+ mp.syspath_prepend('world')
+ mp.syspath_prepend('hello')
+ assert sys.path[0] == "hello"
+ assert sys.path[1] == "world"
+ mp.undo()
+ assert sys.path == old
+ mp.undo()
+ assert sys.path == old
+
+
+def test_syspath_prepend_double_undo(mp):
+ mp.syspath_prepend('hello world')
+ mp.undo()
+ sys.path.append('more hello world')
+ mp.undo()
+ assert sys.path[-1] == 'more hello world'
+
+
+def test_chdir_with_path_local(mp, tmpdir):
+ mp.chdir(tmpdir)
+ assert os.getcwd() == tmpdir.strpath
+
+
+def test_chdir_with_str(mp, tmpdir):
+ mp.chdir(tmpdir.strpath)
+ assert os.getcwd() == tmpdir.strpath
+
+
+def test_chdir_undo(mp, tmpdir):
+ cwd = os.getcwd()
+ mp.chdir(tmpdir)
+ mp.undo()
+ assert os.getcwd() == cwd
+
+
+def test_chdir_double_undo(mp, tmpdir):
+ mp.chdir(tmpdir.strpath)
+ mp.undo()
+ tmpdir.chdir()
+ mp.undo()
+ assert os.getcwd() == tmpdir.strpath
+
+
+def test_issue185_time_breaks(testdir):
+ testdir.makepyfile("""
+ import time
+ def test_m(monkeypatch):
+ def f():
+ raise Exception
+ monkeypatch.setattr(time, "time", f)
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *1 passed*
+ """)
+
+
+def test_importerror(testdir):
+ p = testdir.mkpydir("package")
+ p.join("a.py").write(textwrap.dedent("""\
+ import doesnotexist
+
+ x = 1
+ """))
+ testdir.tmpdir.join("test_importerror.py").write(textwrap.dedent("""\
+ def test_importerror(monkeypatch):
+ monkeypatch.setattr('package.a.x', 2)
+ """))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *import error in package.a: No module named {0}doesnotexist{0}*
+ """.format("'" if sys.version_info > (3, 0) else ""))
+
+
+class SampleNew(object):
+ @staticmethod
+ def hello():
+ return True
+
+
+class SampleNewInherit(SampleNew):
+ pass
+
+
+class SampleOld:
+ # oldstyle on python2
+ @staticmethod
+ def hello():
+ return True
+
+
+class SampleOldInherit(SampleOld):
+ pass
+
+
+@pytest.mark.parametrize('Sample', [
+ SampleNew, SampleNewInherit,
+ SampleOld, SampleOldInherit,
+], ids=['new', 'new-inherit', 'old', 'old-inherit'])
+def test_issue156_undo_staticmethod(Sample):
+ monkeypatch = MonkeyPatch()
+
+ monkeypatch.setattr(Sample, 'hello', None)
+ assert Sample.hello is None
+
+ monkeypatch.undo()
+ assert Sample.hello()
+
+def test_issue1338_name_resolving():
+ pytest.importorskip('requests')
+ monkeypatch = MonkeyPatch()
+ try:
+ monkeypatch.delattr('requests.sessions.Session.request')
+ finally:
+ monkeypatch.undo() \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_nose.py b/testing/web-platform/tests/tools/pytest/testing/test_nose.py
new file mode 100644
index 000000000..a5162381e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_nose.py
@@ -0,0 +1,394 @@
+import pytest
+
+def setup_module(mod):
+ mod.nose = pytest.importorskip("nose")
+
+def test_nose_setup(testdir):
+ p = testdir.makepyfile("""
+ l = []
+ from nose.tools import with_setup
+
+ @with_setup(lambda: l.append(1), lambda: l.append(2))
+ def test_hello():
+ assert l == [1]
+
+ def test_world():
+ assert l == [1,2]
+
+ test_hello.setup = lambda: l.append(1)
+ test_hello.teardown = lambda: l.append(2)
+ """)
+ result = testdir.runpytest(p, '-p', 'nose')
+ result.assert_outcomes(passed=2)
+
+
+def test_setup_func_with_setup_decorator():
+ from _pytest.nose import call_optional
+ l = []
+ class A:
+ @pytest.fixture(autouse=True)
+ def f(self):
+ l.append(1)
+ call_optional(A(), "f")
+ assert not l
+
+
+def test_setup_func_not_callable():
+ from _pytest.nose import call_optional
+ class A:
+ f = 1
+ call_optional(A(), "f")
+
+def test_nose_setup_func(testdir):
+ p = testdir.makepyfile("""
+ from nose.tools import with_setup
+
+ l = []
+
+ def my_setup():
+ a = 1
+ l.append(a)
+
+ def my_teardown():
+ b = 2
+ l.append(b)
+
+ @with_setup(my_setup, my_teardown)
+ def test_hello():
+ print (l)
+ assert l == [1]
+
+ def test_world():
+ print (l)
+ assert l == [1,2]
+
+ """)
+ result = testdir.runpytest(p, '-p', 'nose')
+ result.assert_outcomes(passed=2)
+
+
+def test_nose_setup_func_failure(testdir):
+ p = testdir.makepyfile("""
+ from nose.tools import with_setup
+
+ l = []
+ my_setup = lambda x: 1
+ my_teardown = lambda x: 2
+
+ @with_setup(my_setup, my_teardown)
+ def test_hello():
+ print (l)
+ assert l == [1]
+
+ def test_world():
+ print (l)
+ assert l == [1,2]
+
+ """)
+ result = testdir.runpytest(p, '-p', 'nose')
+ result.stdout.fnmatch_lines([
+ "*TypeError: <lambda>()*"
+ ])
+
+
+def test_nose_setup_func_failure_2(testdir):
+ testdir.makepyfile("""
+ l = []
+
+ my_setup = 1
+ my_teardown = 2
+
+ def test_hello():
+ assert l == []
+
+ test_hello.setup = my_setup
+ test_hello.teardown = my_teardown
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+def test_nose_setup_partial(testdir):
+ pytest.importorskip("functools")
+ p = testdir.makepyfile("""
+ from functools import partial
+
+ l = []
+
+ def my_setup(x):
+ a = x
+ l.append(a)
+
+ def my_teardown(x):
+ b = x
+ l.append(b)
+
+ my_setup_partial = partial(my_setup, 1)
+ my_teardown_partial = partial(my_teardown, 2)
+
+ def test_hello():
+ print (l)
+ assert l == [1]
+
+ def test_world():
+ print (l)
+ assert l == [1,2]
+
+ test_hello.setup = my_setup_partial
+ test_hello.teardown = my_teardown_partial
+ """)
+ result = testdir.runpytest(p, '-p', 'nose')
+ result.stdout.fnmatch_lines([
+ "*2 passed*"
+ ])
+
+
+def test_nose_test_generator_fixtures(testdir):
+ p = testdir.makepyfile("""
+ # taken from nose-0.11.1 unit_tests/test_generator_fixtures.py
+ from nose.tools import eq_
+ called = []
+
+ def outer_setup():
+ called.append('outer_setup')
+
+ def outer_teardown():
+ called.append('outer_teardown')
+
+ def inner_setup():
+ called.append('inner_setup')
+
+ def inner_teardown():
+ called.append('inner_teardown')
+
+ def test_gen():
+ called[:] = []
+ for i in range(0, 5):
+ yield check, i
+
+ def check(i):
+ expect = ['outer_setup']
+ for x in range(0, i):
+ expect.append('inner_setup')
+ expect.append('inner_teardown')
+ expect.append('inner_setup')
+ eq_(called, expect)
+
+
+ test_gen.setup = outer_setup
+ test_gen.teardown = outer_teardown
+ check.setup = inner_setup
+ check.teardown = inner_teardown
+
+ class TestClass(object):
+ def setup(self):
+ print ("setup called in %s" % self)
+ self.called = ['setup']
+
+ def teardown(self):
+ print ("teardown called in %s" % self)
+ eq_(self.called, ['setup'])
+ self.called.append('teardown')
+
+ def test(self):
+ print ("test called in %s" % self)
+ for i in range(0, 5):
+ yield self.check, i
+
+ def check(self, i):
+ print ("check called in %s" % self)
+ expect = ['setup']
+ #for x in range(0, i):
+ # expect.append('setup')
+ # expect.append('teardown')
+ #expect.append('setup')
+ eq_(self.called, expect)
+ """)
+ result = testdir.runpytest(p, '-p', 'nose')
+ result.stdout.fnmatch_lines([
+ "*10 passed*"
+ ])
+
+
+def test_module_level_setup(testdir):
+ testdir.makepyfile("""
+ from nose.tools import with_setup
+ items = {}
+
+ def setup():
+ items[1]=1
+
+ def teardown():
+ del items[1]
+
+ def setup2():
+ items[2] = 2
+
+ def teardown2():
+ del items[2]
+
+ def test_setup_module_setup():
+ assert items[1] == 1
+
+ @with_setup(setup2, teardown2)
+ def test_local_setup():
+ assert items[2] == 2
+ assert 1 not in items
+ """)
+ result = testdir.runpytest('-p', 'nose')
+ result.stdout.fnmatch_lines([
+ "*2 passed*",
+ ])
+
+
+def test_nose_style_setup_teardown(testdir):
+ testdir.makepyfile("""
+ l = []
+
+ def setup_module():
+ l.append(1)
+
+ def teardown_module():
+ del l[0]
+
+ def test_hello():
+ assert l == [1]
+
+ def test_world():
+ assert l == [1]
+ """)
+ result = testdir.runpytest('-p', 'nose')
+ result.stdout.fnmatch_lines([
+ "*2 passed*",
+ ])
+
+def test_nose_setup_ordering(testdir):
+ testdir.makepyfile("""
+ def setup_module(mod):
+ mod.visited = True
+
+ class TestClass:
+ def setup(self):
+ assert visited
+ def test_first(self):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+
+
+def test_apiwrapper_problem_issue260(testdir):
+ # this would end up trying a call a optional teardown on the class
+ # for plain unittests we dont want nose behaviour
+ testdir.makepyfile("""
+ import unittest
+ class TestCase(unittest.TestCase):
+ def setup(self):
+ #should not be called in unittest testcases
+ assert 0, 'setup'
+ def teardown(self):
+ #should not be called in unittest testcases
+ assert 0, 'teardown'
+ def setUp(self):
+ print('setup')
+ def tearDown(self):
+ print('teardown')
+ def test_fun(self):
+ pass
+ """)
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+def test_setup_teardown_linking_issue265(testdir):
+ # we accidentally didnt integrate nose setupstate with normal setupstate
+ # this test ensures that won't happen again
+ testdir.makepyfile('''
+ import pytest
+
+ class TestGeneric(object):
+ def test_nothing(self):
+ """Tests the API of the implementation (for generic and specialized)."""
+
+ @pytest.mark.skipif("True", reason=
+ "Skip tests to check if teardown is skipped as well.")
+ class TestSkipTeardown(TestGeneric):
+
+ def setup(self):
+ """Sets up my specialized implementation for $COOL_PLATFORM."""
+ raise Exception("should not call setup for skipped tests")
+
+ def teardown(self):
+ """Undoes the setup."""
+ raise Exception("should not call teardown for skipped tests")
+ ''')
+ reprec = testdir.runpytest()
+ reprec.assert_outcomes(passed=1, skipped=1)
+
+
+def test_SkipTest_during_collection(testdir):
+ p = testdir.makepyfile("""
+ import nose
+ raise nose.SkipTest("during collection")
+ def test_failing():
+ assert False
+ """)
+ result = testdir.runpytest(p)
+ result.assert_outcomes(skipped=1)
+
+
+def test_SkipTest_in_test(testdir):
+ testdir.makepyfile("""
+ import nose
+
+ def test_skipping():
+ raise nose.SkipTest("in test")
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(skipped=1)
+
+def test_istest_function_decorator(testdir):
+ p = testdir.makepyfile("""
+ import nose.tools
+ @nose.tools.istest
+ def not_test_prefix():
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.assert_outcomes(passed=1)
+
+def test_nottest_function_decorator(testdir):
+ testdir.makepyfile("""
+ import nose.tools
+ @nose.tools.nottest
+ def test_prefix():
+ pass
+ """)
+ reprec = testdir.inline_run()
+ assert not reprec.getfailedcollections()
+ calls = reprec.getreports("pytest_runtest_logreport")
+ assert not calls
+
+def test_istest_class_decorator(testdir):
+ p = testdir.makepyfile("""
+ import nose.tools
+ @nose.tools.istest
+ class NotTestPrefix:
+ def test_method(self):
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.assert_outcomes(passed=1)
+
+def test_nottest_class_decorator(testdir):
+ testdir.makepyfile("""
+ import nose.tools
+ @nose.tools.nottest
+ class TestPrefix:
+ def test_method(self):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ assert not reprec.getfailedcollections()
+ calls = reprec.getreports("pytest_runtest_logreport")
+ assert not calls
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_parseopt.py b/testing/web-platform/tests/tools/pytest/testing/test_parseopt.py
new file mode 100644
index 000000000..e45ee2854
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_parseopt.py
@@ -0,0 +1,287 @@
+from __future__ import with_statement
+import sys
+import os
+import py, pytest
+from _pytest import config as parseopt
+
+@pytest.fixture
+def parser():
+ return parseopt.Parser()
+
+class TestParser:
+ def test_no_help_by_default(self, capsys):
+ parser = parseopt.Parser(usage="xyz")
+ pytest.raises(SystemExit, lambda: parser.parse(["-h"]))
+ out, err = capsys.readouterr()
+ assert err.find("error: unrecognized arguments") != -1
+
+ def test_argument(self):
+ with pytest.raises(parseopt.ArgumentError):
+ # need a short or long option
+ argument = parseopt.Argument()
+ argument = parseopt.Argument('-t')
+ assert argument._short_opts == ['-t']
+ assert argument._long_opts == []
+ assert argument.dest == 't'
+ argument = parseopt.Argument('-t', '--test')
+ assert argument._short_opts == ['-t']
+ assert argument._long_opts == ['--test']
+ assert argument.dest == 'test'
+ argument = parseopt.Argument('-t', '--test', dest='abc')
+ assert argument.dest == 'abc'
+
+ def test_argument_type(self):
+ argument = parseopt.Argument('-t', dest='abc', type='int')
+ assert argument.type is int
+ argument = parseopt.Argument('-t', dest='abc', type='string')
+ assert argument.type is str
+ argument = parseopt.Argument('-t', dest='abc', type=float)
+ assert argument.type is float
+ with pytest.raises(KeyError):
+ argument = parseopt.Argument('-t', dest='abc', type='choice')
+ argument = parseopt.Argument('-t', dest='abc', type='choice',
+ choices=['red', 'blue'])
+ assert argument.type is str
+
+ def test_argument_processopt(self):
+ argument = parseopt.Argument('-t', type=int)
+ argument.default = 42
+ argument.dest = 'abc'
+ res = argument.attrs()
+ assert res['default'] == 42
+ assert res['dest'] == 'abc'
+
+ def test_group_add_and_get(self, parser):
+ group = parser.getgroup("hello", description="desc")
+ assert group.name == "hello"
+ assert group.description == "desc"
+
+ def test_getgroup_simple(self, parser):
+ group = parser.getgroup("hello", description="desc")
+ assert group.name == "hello"
+ assert group.description == "desc"
+ group2 = parser.getgroup("hello")
+ assert group2 is group
+
+ def test_group_ordering(self, parser):
+ parser.getgroup("1")
+ parser.getgroup("2")
+ parser.getgroup("3", after="1")
+ groups = parser._groups
+ groups_names = [x.name for x in groups]
+ assert groups_names == list("132")
+
+ def test_group_addoption(self):
+ group = parseopt.OptionGroup("hello")
+ group.addoption("--option1", action="store_true")
+ assert len(group.options) == 1
+ assert isinstance(group.options[0], parseopt.Argument)
+
+ def test_group_shortopt_lowercase(self, parser):
+ group = parser.getgroup("hello")
+ pytest.raises(ValueError, """
+ group.addoption("-x", action="store_true")
+ """)
+ assert len(group.options) == 0
+ group._addoption("-x", action="store_true")
+ assert len(group.options) == 1
+
+ def test_parser_addoption(self, parser):
+ group = parser.getgroup("custom options")
+ assert len(group.options) == 0
+ group.addoption("--option1", action="store_true")
+ assert len(group.options) == 1
+
+ def test_parse(self, parser):
+ parser.addoption("--hello", dest="hello", action="store")
+ args = parser.parse(['--hello', 'world'])
+ assert args.hello == "world"
+ assert not getattr(args, parseopt.FILE_OR_DIR)
+
+ def test_parse2(self, parser):
+ args = parser.parse([py.path.local()])
+ assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()
+
+ def test_parse_known_args(self, parser):
+ parser.parse_known_args([py.path.local()])
+ parser.addoption("--hello", action="store_true")
+ ns = parser.parse_known_args(["x", "--y", "--hello", "this"])
+ assert ns.hello
+ assert ns.file_or_dir == ['x']
+
+ def test_parse_known_and_unknown_args(self, parser):
+ parser.addoption("--hello", action="store_true")
+ ns, unknown = parser.parse_known_and_unknown_args(["x", "--y",
+ "--hello", "this"])
+ assert ns.hello
+ assert ns.file_or_dir == ['x']
+ assert unknown == ['--y', 'this']
+
+ def test_parse_will_set_default(self, parser):
+ parser.addoption("--hello", dest="hello", default="x", action="store")
+ option = parser.parse([])
+ assert option.hello == "x"
+ del option.hello
+ parser.parse_setoption([], option)
+ assert option.hello == "x"
+
+ def test_parse_setoption(self, parser):
+ parser.addoption("--hello", dest="hello", action="store")
+ parser.addoption("--world", dest="world", default=42)
+ class A: pass
+ option = A()
+ args = parser.parse_setoption(['--hello', 'world'], option)
+ assert option.hello == "world"
+ assert option.world == 42
+ assert not args
+
+ def test_parse_special_destination(self, parser):
+ parser.addoption("--ultimate-answer", type=int)
+ args = parser.parse(['--ultimate-answer', '42'])
+ assert args.ultimate_answer == 42
+
+ def test_parse_split_positional_arguments(self, parser):
+ parser.addoption("-R", action='store_true')
+ parser.addoption("-S", action='store_false')
+ args = parser.parse(['-R', '4', '2', '-S'])
+ assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
+ args = parser.parse(['-R', '-S', '4', '2', '-R'])
+ assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
+ assert args.R == True
+ assert args.S == False
+ args = parser.parse(['-R', '4', '-S', '2'])
+ assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2']
+ assert args.R == True
+ assert args.S == False
+
+ def test_parse_defaultgetter(self):
+ def defaultget(option):
+ if not hasattr(option, 'type'):
+ return
+ if option.type is int:
+ option.default = 42
+ elif option.type is str:
+ option.default = "world"
+ parser = parseopt.Parser(processopt=defaultget)
+ parser.addoption("--this", dest="this", type="int", action="store")
+ parser.addoption("--hello", dest="hello", type="string", action="store")
+ parser.addoption("--no", dest="no", action="store_true")
+ option = parser.parse([])
+ assert option.hello == "world"
+ assert option.this == 42
+ assert option.no is False
+
+ def test_drop_short_helper(self):
+ parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter)
+ parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two',
+ help='foo').map_long_option = {'two': 'two-word'}
+ # throws error on --deux only!
+ parser.add_argument('-d', '--deuxmots', '--deux-mots',
+ action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'}
+ parser.add_argument('-s', action='store_true', help='single short')
+ parser.add_argument('--abc', '-a',
+ action='store_true', help='bar')
+ parser.add_argument('--klm', '-k', '--kl-m',
+ action='store_true', help='bar')
+ parser.add_argument('-P', '--pq-r', '-p', '--pqr',
+ action='store_true', help='bar')
+ parser.add_argument('--zwei-wort', '--zweiwort', '--zweiwort',
+ action='store_true', help='bar')
+ parser.add_argument('-x', '--exit-on-first', '--exitfirst',
+ action='store_true', help='spam').map_long_option = {'exitfirst': 'exit-on-first'}
+ parser.add_argument('files_and_dirs', nargs='*')
+ args = parser.parse_args(['-k', '--duo', 'hallo', '--exitfirst'])
+ assert args.twoword == 'hallo'
+ assert args.klm is True
+ assert args.zwei_wort is False
+ assert args.exit_on_first is True
+ assert args.s is False
+ args = parser.parse_args(['--deux-mots'])
+ with pytest.raises(AttributeError):
+ assert args.deux_mots is True
+ assert args.deuxmots is True
+ args = parser.parse_args(['file', 'dir'])
+ assert '|'.join(args.files_and_dirs) == 'file|dir'
+
+ def test_drop_short_0(self, parser):
+ parser.addoption('--funcarg', '--func-arg', action='store_true')
+ parser.addoption('--abc-def', '--abc-def', action='store_true')
+ parser.addoption('--klm-hij', action='store_true')
+ args = parser.parse(['--funcarg', '--k'])
+ assert args.funcarg is True
+ assert args.abc_def is False
+ assert args.klm_hij is True
+
+ def test_drop_short_2(self, parser):
+ parser.addoption('--func-arg', '--doit', action='store_true')
+ args = parser.parse(['--doit'])
+ assert args.func_arg is True
+
+ def test_drop_short_3(self, parser):
+ parser.addoption('--func-arg', '--funcarg', '--doit', action='store_true')
+ args = parser.parse(['abcd'])
+ assert args.func_arg is False
+ assert args.file_or_dir == ['abcd']
+
+ def test_drop_short_help0(self, parser, capsys):
+ parser.addoption('--func-args', '--doit', help = 'foo',
+ action='store_true')
+ parser.parse([])
+ help = parser.optparser.format_help()
+ assert '--func-args, --doit foo' in help
+
+ # testing would be more helpful with all help generated
+ def test_drop_short_help1(self, parser, capsys):
+ group = parser.getgroup("general")
+ group.addoption('--doit', '--func-args', action='store_true', help='foo')
+ group._addoption("-h", "--help", action="store_true", dest="help",
+ help="show help message and configuration info")
+ parser.parse(['-h'])
+ help = parser.optparser.format_help()
+ assert '-doit, --func-args foo' in help
+
+
+def test_argcomplete(testdir, monkeypatch):
+ if not py.path.local.sysfind('bash'):
+ pytest.skip("bash not available")
+ script = str(testdir.tmpdir.join("test_argcomplete"))
+ pytest_bin = sys.argv[0]
+ if "py.test" not in os.path.basename(pytest_bin):
+ pytest.skip("need to be run with py.test executable, not %s" %(pytest_bin,))
+
+ with open(str(script), 'w') as fp:
+ # redirect output from argcomplete to stdin and stderr is not trivial
+ # http://stackoverflow.com/q/12589419/1307905
+ # so we use bash
+ fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin)
+ # alternative would be exteneded Testdir.{run(),_run(),popen()} to be able
+ # to handle a keyword argument env that replaces os.environ in popen or
+ # extends the copy, advantage: could not forget to restore
+ monkeypatch.setenv('_ARGCOMPLETE', "1")
+ monkeypatch.setenv('_ARGCOMPLETE_IFS',"\x0b")
+ monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:')
+
+ arg = '--fu'
+ monkeypatch.setenv('COMP_LINE', "py.test " + arg)
+ monkeypatch.setenv('COMP_POINT', str(len("py.test " + arg)))
+ result = testdir.run('bash', str(script), arg)
+ if result.ret == 255:
+ # argcomplete not found
+ pytest.skip("argcomplete not available")
+ elif not result.stdout.str():
+ pytest.skip("bash provided no output, argcomplete not available?")
+ else:
+ if py.std.sys.version_info < (2,7):
+ result.stdout.lines = result.stdout.lines[0].split('\x0b')
+ result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
+ else:
+ result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"])
+ if py.std.sys.version_info < (2,7):
+ return
+ os.mkdir('test_argcomplete.d')
+ arg = 'test_argc'
+ monkeypatch.setenv('COMP_LINE', "py.test " + arg)
+ monkeypatch.setenv('COMP_POINT', str(len('py.test ' + arg)))
+ result = testdir.run('bash', str(script), arg)
+ result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_pastebin.py b/testing/web-platform/tests/tools/pytest/testing/test_pastebin.py
new file mode 100644
index 000000000..03570a5c7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_pastebin.py
@@ -0,0 +1,115 @@
+# encoding: utf-8
+import sys
+import pytest
+
+class TestPasteCapture:
+
+ @pytest.fixture
+ def pastebinlist(self, monkeypatch, request):
+ pastebinlist = []
+ plugin = request.config.pluginmanager.getplugin('pastebin')
+ monkeypatch.setattr(plugin, 'create_new_paste', pastebinlist.append)
+ return pastebinlist
+
+ def test_failed(self, testdir, pastebinlist):
+ testpath = testdir.makepyfile("""
+ import pytest
+ def test_pass():
+ pass
+ def test_fail():
+ assert 0
+ def test_skip():
+ pytest.skip("")
+ """)
+ reprec = testdir.inline_run(testpath, "--paste=failed")
+ assert len(pastebinlist) == 1
+ s = pastebinlist[0]
+ assert s.find("def test_fail") != -1
+ assert reprec.countoutcomes() == [1,1,1]
+
+ def test_all(self, testdir, pastebinlist):
+ from _pytest.pytester import LineMatcher
+ testpath = testdir.makepyfile("""
+ import pytest
+ def test_pass():
+ pass
+ def test_fail():
+ assert 0
+ def test_skip():
+ pytest.skip("")
+ """)
+ reprec = testdir.inline_run(testpath, "--pastebin=all", '-v')
+ assert reprec.countoutcomes() == [1,1,1]
+ assert len(pastebinlist) == 1
+ contents = pastebinlist[0].decode('utf-8')
+ matcher = LineMatcher(contents.splitlines())
+ matcher.fnmatch_lines([
+ '*test_pass PASSED*',
+ '*test_fail FAILED*',
+ '*test_skip SKIPPED*',
+ '*== 1 failed, 1 passed, 1 skipped in *'
+ ])
+
+ def test_non_ascii_paste_text(self, testdir):
+ """Make sure that text which contains non-ascii characters is pasted
+ correctly. See #1219.
+ """
+ testdir.makepyfile(test_unicode="""
+ # encoding: utf-8
+ def test():
+ assert '☺' == 1
+ """)
+ result = testdir.runpytest('--pastebin=all')
+ if sys.version_info[0] == 3:
+ expected_msg = "*assert '☺' == 1*"
+ else:
+ expected_msg = "*assert '\\xe2\\x98\\xba' == 1*"
+ result.stdout.fnmatch_lines([
+ expected_msg,
+ "*== 1 failed in *",
+ '*Sending information to Paste Service*',
+ ])
+
+
+class TestPaste:
+
+ @pytest.fixture
+ def pastebin(self, request):
+ return request.config.pluginmanager.getplugin('pastebin')
+
+ @pytest.fixture
+ def mocked_urlopen(self, monkeypatch):
+ """
+ monkeypatch the actual urlopen calls done by the internal plugin
+ function that connects to bpaste service.
+ """
+ calls = []
+ def mocked(url, data):
+ calls.append((url, data))
+ class DummyFile:
+ def read(self):
+ # part of html of a normal response
+ return b'View <a href="/raw/3c0c6750bd">raw</a>.'
+ return DummyFile()
+
+ if sys.version_info < (3, 0):
+ import urllib
+ monkeypatch.setattr(urllib, 'urlopen', mocked)
+ else:
+ import urllib.request
+ monkeypatch.setattr(urllib.request, 'urlopen', mocked)
+ return calls
+
+ def test_create_new_paste(self, pastebin, mocked_urlopen):
+ result = pastebin.create_new_paste(b'full-paste-contents')
+ assert result == 'https://bpaste.net/show/3c0c6750bd'
+ assert len(mocked_urlopen) == 1
+ url, data = mocked_urlopen[0]
+ assert type(data) is bytes
+ lexer = 'python3' if sys.version_info[0] == 3 else 'python'
+ assert url == 'https://bpaste.net'
+ assert 'lexer=%s' % lexer in data.decode()
+ assert 'code=full-paste-contents' in data.decode()
+ assert 'expiry=1week' in data.decode()
+
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_pdb.py b/testing/web-platform/tests/tools/pytest/testing/test_pdb.py
new file mode 100644
index 000000000..eeddcf0ae
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_pdb.py
@@ -0,0 +1,313 @@
+import sys
+
+import _pytest._code
+
+
+def runpdb_and_get_report(testdir, source):
+ p = testdir.makepyfile(source)
+ result = testdir.runpytest_inprocess("--pdb", p)
+ reports = result.reprec.getreports("pytest_runtest_logreport")
+ assert len(reports) == 3, reports # setup/call/teardown
+ return reports[1]
+
+
+class TestPDB:
+ def pytest_funcarg__pdblist(self, request):
+ monkeypatch = request.getfuncargvalue("monkeypatch")
+ pdblist = []
+ def mypdb(*args):
+ pdblist.append(args)
+ plugin = request.config.pluginmanager.getplugin('pdb')
+ monkeypatch.setattr(plugin, 'post_mortem', mypdb)
+ return pdblist
+
+ def test_pdb_on_fail(self, testdir, pdblist):
+ rep = runpdb_and_get_report(testdir, """
+ def test_func():
+ assert 0
+ """)
+ assert rep.failed
+ assert len(pdblist) == 1
+ tb = _pytest._code.Traceback(pdblist[0][0])
+ assert tb[-1].name == "test_func"
+
+ def test_pdb_on_xfail(self, testdir, pdblist):
+ rep = runpdb_and_get_report(testdir, """
+ import pytest
+ @pytest.mark.xfail
+ def test_func():
+ assert 0
+ """)
+ assert "xfail" in rep.keywords
+ assert not pdblist
+
+ def test_pdb_on_skip(self, testdir, pdblist):
+ rep = runpdb_and_get_report(testdir, """
+ import pytest
+ def test_func():
+ pytest.skip("hello")
+ """)
+ assert rep.skipped
+ assert len(pdblist) == 0
+
+ def test_pdb_on_BdbQuit(self, testdir, pdblist):
+ rep = runpdb_and_get_report(testdir, """
+ import bdb
+ def test_func():
+ raise bdb.BdbQuit
+ """)
+ assert rep.failed
+ assert len(pdblist) == 0
+
+ def test_pdb_interaction(self, testdir):
+ p1 = testdir.makepyfile("""
+ def test_1():
+ i = 0
+ assert i == 1
+ """)
+ child = testdir.spawn_pytest("--pdb %s" % p1)
+ child.expect(".*def test_1")
+ child.expect(".*i = 0")
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ assert "def test_1" not in rest
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_capture(self, testdir):
+ p1 = testdir.makepyfile("""
+ def test_1():
+ print("getrekt")
+ assert False
+ """)
+ child = testdir.spawn_pytest("--pdb %s" % p1)
+ child.expect("getrekt")
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ assert "getrekt" not in rest
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_exception(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def globalfunc():
+ pass
+ def test_1():
+ pytest.raises(ValueError, globalfunc)
+ """)
+ child = testdir.spawn_pytest("--pdb %s" % p1)
+ child.expect(".*def test_1")
+ child.expect(".*pytest.raises.*globalfunc")
+ child.expect("(Pdb)")
+ child.sendline("globalfunc")
+ child.expect(".*function")
+ child.sendeof()
+ child.expect("1 failed")
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_on_collection_issue181(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ xxx
+ """)
+ child = testdir.spawn_pytest("--pdb %s" % p1)
+ #child.expect(".*import pytest.*")
+ child.expect("(Pdb)")
+ child.sendeof()
+ child.expect("1 error")
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_on_internal_error(self, testdir):
+ testdir.makeconftest("""
+ def pytest_runtest_protocol():
+ 0/0
+ """)
+ p1 = testdir.makepyfile("def test_func(): pass")
+ child = testdir.spawn_pytest("--pdb %s" % p1)
+ #child.expect(".*import pytest.*")
+ child.expect("(Pdb)")
+ child.sendeof()
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_capturing_simple(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def test_1():
+ i = 0
+ print ("hello17")
+ pytest.set_trace()
+ x = 3
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("test_1")
+ child.expect("x = 3")
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf-8")
+ assert "1 failed" in rest
+ assert "def test_1" in rest
+ assert "hello17" in rest # out is captured
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_set_trace_interception(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pdb
+ def test_1():
+ pdb.set_trace()
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("test_1")
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ assert "reading from stdin while output" not in rest
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_and_capsys(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def test_1(capsys):
+ print ("hello1")
+ pytest.set_trace()
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("test_1")
+ child.send("capsys.readouterr()\n")
+ child.expect("hello1")
+ child.sendeof()
+ child.read()
+ if child.isalive():
+ child.wait()
+
+ def test_set_trace_capturing_afterwards(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pdb
+ def test_1():
+ pdb.set_trace()
+ def test_2():
+ print ("hello")
+ assert 0
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("test_1")
+ child.send("c\n")
+ child.expect("test_2")
+ child.expect("Captured")
+ child.expect("hello")
+ child.sendeof()
+ child.read()
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_doctest(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def function_1():
+ '''
+ >>> i = 0
+ >>> assert i == 1
+ '''
+ """)
+ child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
+ child.expect("(Pdb)")
+ child.sendline('i')
+ child.expect("0")
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_interaction_capturing_twice(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def test_1():
+ i = 0
+ print ("hello17")
+ pytest.set_trace()
+ x = 3
+ print ("hello18")
+ pytest.set_trace()
+ x = 4
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("test_1")
+ child.expect("x = 3")
+ child.expect("(Pdb)")
+ child.sendline('c')
+ child.expect("x = 4")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ assert "def test_1" in rest
+ assert "hello17" in rest # out is captured
+ assert "hello18" in rest # out is captured
+ if child.isalive():
+ child.wait()
+
+ def test_pdb_used_outside_test(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ pytest.set_trace()
+ x = 5
+ """)
+ child = testdir.spawn("%s %s" %(sys.executable, p1))
+ child.expect("x = 5")
+ child.sendeof()
+ child.wait()
+
+ def test_pdb_used_in_generate_tests(self, testdir):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def pytest_generate_tests(metafunc):
+ pytest.set_trace()
+ x = 5
+ def test_foo(a):
+ pass
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("x = 5")
+ child.sendeof()
+ child.wait()
+
+ def test_pdb_collection_failure_is_shown(self, testdir):
+ p1 = testdir.makepyfile("""xxx """)
+ result = testdir.runpytest_subprocess("--pdb", p1)
+ result.stdout.fnmatch_lines([
+ "*NameError*xxx*",
+ "*1 error*",
+ ])
+
+ def test_enter_pdb_hook_is_called(self, testdir):
+ testdir.makeconftest("""
+ def pytest_enter_pdb(config):
+ assert config.testing_verification == 'configured'
+ print 'enter_pdb_hook'
+
+ def pytest_configure(config):
+ config.testing_verification = 'configured'
+ """)
+ p1 = testdir.makepyfile("""
+ import pytest
+
+ def test_foo():
+ pytest.set_trace()
+ """)
+ child = testdir.spawn_pytest(str(p1))
+ child.expect("enter_pdb_hook")
+ child.send('c\n')
+ child.sendeof()
+ if child.isalive():
+ child.wait()
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_pluginmanager.py b/testing/web-platform/tests/tools/pytest/testing/test_pluginmanager.py
new file mode 100644
index 000000000..36847638d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_pluginmanager.py
@@ -0,0 +1,340 @@
+import pytest
+import py
+import os
+
+from _pytest.config import get_config, PytestPluginManager
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+
+@pytest.fixture
+def pytestpm():
+ return PytestPluginManager()
+
+class TestPytestPluginInteractions:
+ def test_addhooks_conftestplugin(self, testdir):
+ testdir.makepyfile(newhooks="""
+ def pytest_myhook(xyz):
+ "new hook"
+ """)
+ conf = testdir.makeconftest("""
+ import sys ; sys.path.insert(0, '.')
+ import newhooks
+ def pytest_addhooks(pluginmanager):
+ pluginmanager.addhooks(newhooks)
+ def pytest_myhook(xyz):
+ return xyz + 1
+ """)
+ config = get_config()
+ pm = config.pluginmanager
+ pm.hook.pytest_addhooks.call_historic(
+ kwargs=dict(pluginmanager=config.pluginmanager))
+ config.pluginmanager._importconftest(conf)
+ #print(config.pluginmanager.get_plugins())
+ res = config.hook.pytest_myhook(xyz=10)
+ assert res == [11]
+
+ def test_addhooks_nohooks(self, testdir):
+ testdir.makeconftest("""
+ import sys
+ def pytest_addhooks(pluginmanager):
+ pluginmanager.addhooks(sys)
+ """)
+ res = testdir.runpytest()
+ assert res.ret != 0
+ res.stderr.fnmatch_lines([
+ "*did not find*sys*"
+ ])
+
+ def test_namespace_early_from_import(self, testdir):
+ p = testdir.makepyfile("""
+ from pytest import Item
+ from pytest import Item as Item2
+ assert Item is Item2
+ """)
+ result = testdir.runpython(p)
+ assert result.ret == 0
+
+ def test_do_ext_namespace(self, testdir):
+ testdir.makeconftest("""
+ def pytest_namespace():
+ return {'hello': 'world'}
+ """)
+ p = testdir.makepyfile("""
+ from pytest import hello
+ import pytest
+ def test_hello():
+ assert hello == "world"
+ assert 'hello' in pytest.__all__
+ """)
+ reprec = testdir.inline_run(p)
+ reprec.assertoutcome(passed=1)
+
+ def test_do_option_postinitialize(self, testdir):
+ config = testdir.parseconfigure()
+ assert not hasattr(config.option, 'test123')
+ p = testdir.makepyfile("""
+ def pytest_addoption(parser):
+ parser.addoption('--test123', action="store_true",
+ default=True)
+ """)
+ config.pluginmanager._importconftest(p)
+ assert config.option.test123
+
+ def test_configure(self, testdir):
+ config = testdir.parseconfig()
+ l = []
+ class A:
+ def pytest_configure(self, config):
+ l.append(self)
+
+ config.pluginmanager.register(A())
+ assert len(l) == 0
+ config._do_configure()
+ assert len(l) == 1
+ config.pluginmanager.register(A()) # leads to a configured() plugin
+ assert len(l) == 2
+ assert l[0] != l[1]
+
+ config._ensure_unconfigure()
+ config.pluginmanager.register(A())
+ assert len(l) == 2
+
+ def test_hook_tracing(self):
+ pytestpm = get_config().pluginmanager # fully initialized with plugins
+ saveindent = []
+ class api1:
+ def pytest_plugin_registered(self):
+ saveindent.append(pytestpm.trace.root.indent)
+ class api2:
+ def pytest_plugin_registered(self):
+ saveindent.append(pytestpm.trace.root.indent)
+ raise ValueError()
+ l = []
+ pytestpm.trace.root.setwriter(l.append)
+ undo = pytestpm.enable_tracing()
+ try:
+ indent = pytestpm.trace.root.indent
+ p = api1()
+ pytestpm.register(p)
+ assert pytestpm.trace.root.indent == indent
+ assert len(l) >= 2
+ assert 'pytest_plugin_registered' in l[0]
+ assert 'finish' in l[1]
+
+ l[:] = []
+ with pytest.raises(ValueError):
+ pytestpm.register(api2())
+ assert pytestpm.trace.root.indent == indent
+ assert saveindent[0] > indent
+ finally:
+ undo()
+
+ def test_warn_on_deprecated_multicall(self, pytestpm):
+ warnings = []
+
+ class get_warnings:
+ def pytest_logwarning(self, message):
+ warnings.append(message)
+
+ class Plugin:
+ def pytest_configure(self, __multicall__):
+ pass
+
+ pytestpm.register(get_warnings())
+ before = list(warnings)
+ pytestpm.register(Plugin())
+ assert len(warnings) == len(before) + 1
+ assert "deprecated" in warnings[-1]
+
+ def test_warn_on_deprecated_addhooks(self, pytestpm):
+ warnings = []
+
+ class get_warnings:
+ def pytest_logwarning(self, code, fslocation, message, nodeid):
+ warnings.append(message)
+
+ class Plugin:
+ def pytest_testhook():
+ pass
+
+ pytestpm.register(get_warnings())
+ before = list(warnings)
+ pytestpm.addhooks(Plugin())
+ assert len(warnings) == len(before) + 1
+ assert "deprecated" in warnings[-1]
+
+
+def test_namespace_has_default_and_env_plugins(testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ pytest.mark
+ """)
+ result = testdir.runpython(p)
+ assert result.ret == 0
+
+def test_default_markers(testdir):
+ result = testdir.runpytest("--markers")
+ result.stdout.fnmatch_lines([
+ "*tryfirst*first*",
+ "*trylast*last*",
+ ])
+
+
+def test_importplugin_issue375(testdir, pytestpm):
+ """Don't hide import errors when importing plugins and provide
+ an easy to debug message.
+ """
+ testdir.syspathinsert(testdir.tmpdir)
+ testdir.makepyfile(qwe="import aaaa")
+ with pytest.raises(ImportError) as excinfo:
+ pytestpm.import_plugin("qwe")
+ expected = '.*Error importing plugin "qwe": No module named \'?aaaa\'?'
+ assert py.std.re.match(expected, str(excinfo.value))
+
+
+class TestPytestPluginManager:
+ def test_register_imported_modules(self):
+ pm = PytestPluginManager()
+ mod = py.std.types.ModuleType("x.y.pytest_hello")
+ pm.register(mod)
+ assert pm.is_registered(mod)
+ l = pm.get_plugins()
+ assert mod in l
+ pytest.raises(ValueError, "pm.register(mod)")
+ pytest.raises(ValueError, lambda: pm.register(mod))
+ #assert not pm.is_registered(mod2)
+ assert pm.get_plugins() == l
+
+ def test_canonical_import(self, monkeypatch):
+ mod = py.std.types.ModuleType("pytest_xyz")
+ monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod)
+ pm = PytestPluginManager()
+ pm.import_plugin('pytest_xyz')
+ assert pm.get_plugin('pytest_xyz') == mod
+ assert pm.is_registered(mod)
+
+ def test_consider_module(self, testdir, pytestpm):
+ testdir.syspathinsert()
+ testdir.makepyfile(pytest_p1="#")
+ testdir.makepyfile(pytest_p2="#")
+ mod = py.std.types.ModuleType("temp")
+ mod.pytest_plugins = ["pytest_p1", "pytest_p2"]
+ pytestpm.consider_module(mod)
+ assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1"
+ assert pytestpm.get_plugin("pytest_p2").__name__ == "pytest_p2"
+
+ def test_consider_module_import_module(self, testdir):
+ pytestpm = get_config().pluginmanager
+ mod = py.std.types.ModuleType("x")
+ mod.pytest_plugins = "pytest_a"
+ aplugin = testdir.makepyfile(pytest_a="#")
+ reprec = testdir.make_hook_recorder(pytestpm)
+ #syspath.prepend(aplugin.dirpath())
+ py.std.sys.path.insert(0, str(aplugin.dirpath()))
+ pytestpm.consider_module(mod)
+ call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name)
+ assert call.plugin.__name__ == "pytest_a"
+
+ # check that it is not registered twice
+ pytestpm.consider_module(mod)
+ l = reprec.getcalls("pytest_plugin_registered")
+ assert len(l) == 1
+
+ def test_consider_env_fails_to_import(self, monkeypatch, pytestpm):
+ monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",")
+ with pytest.raises(ImportError):
+ pytestpm.consider_env()
+
+ def test_plugin_skip(self, testdir, monkeypatch):
+ p = testdir.makepyfile(skipping1="""
+ import pytest
+ pytest.skip("hello")
+ """)
+ p.copy(p.dirpath("skipping2.py"))
+ monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
+ result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+ result.stdout.fnmatch_lines([
+ "WI1*skipped plugin*skipping1*hello*",
+ "WI1*skipped plugin*skipping2*hello*",
+ ])
+
+ def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm):
+ testdir.syspathinsert()
+ testdir.makepyfile(xy123="#")
+ monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123')
+ l1 = len(pytestpm.get_plugins())
+ pytestpm.consider_env()
+ l2 = len(pytestpm.get_plugins())
+ assert l2 == l1 + 1
+ assert pytestpm.get_plugin('xy123')
+ pytestpm.consider_env()
+ l3 = len(pytestpm.get_plugins())
+ assert l2 == l3
+
+ def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
+ testdir.makepyfile(pytest_x500="#")
+ p = testdir.makepyfile("""
+ import pytest
+ def test_hello(pytestconfig):
+ plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500')
+ assert plugin is not None
+ """)
+ monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
+ result = testdir.runpytest(p, syspathinsert=True)
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def test_import_plugin_importname(self, testdir, pytestpm):
+ pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
+ pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")')
+
+ testdir.syspathinsert()
+ pluginname = "pytest_hello"
+ testdir.makepyfile(**{pluginname: ""})
+ pytestpm.import_plugin("pytest_hello")
+ len1 = len(pytestpm.get_plugins())
+ pytestpm.import_plugin("pytest_hello")
+ len2 = len(pytestpm.get_plugins())
+ assert len1 == len2
+ plugin1 = pytestpm.get_plugin("pytest_hello")
+ assert plugin1.__name__.endswith('pytest_hello')
+ plugin2 = pytestpm.get_plugin("pytest_hello")
+ assert plugin2 is plugin1
+
+ def test_import_plugin_dotted_name(self, testdir, pytestpm):
+ pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
+ pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")')
+
+ testdir.syspathinsert()
+ testdir.mkpydir("pkg").join("plug.py").write("x=3")
+ pluginname = "pkg.plug"
+ pytestpm.import_plugin(pluginname)
+ mod = pytestpm.get_plugin("pkg.plug")
+ assert mod.x == 3
+
+ def test_consider_conftest_deps(self, testdir, pytestpm):
+ mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
+ with pytest.raises(ImportError):
+ pytestpm.consider_conftest(mod)
+
+
+class TestPytestPluginManagerBootstrapming:
+ def test_preparse_args(self, pytestpm):
+ pytest.raises(ImportError, lambda:
+ pytestpm.consider_preparse(["xyz", "-p", "hello123"]))
+
+ def test_plugin_prevent_register(self, pytestpm):
+ pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
+ l1 = pytestpm.get_plugins()
+ pytestpm.register(42, name="abc")
+ l2 = pytestpm.get_plugins()
+ assert len(l2) == len(l1)
+ assert 42 not in l2
+
+ def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm):
+ pytestpm.register(42, name="abc")
+ l1 = pytestpm.get_plugins()
+ assert 42 in l1
+ pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
+ l2 = pytestpm.get_plugins()
+ assert 42 not in l2
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_pytester.py b/testing/web-platform/tests/tools/pytest/testing/test_pytester.py
new file mode 100644
index 000000000..65660afdf
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_pytester.py
@@ -0,0 +1,122 @@
+import pytest
+import os
+from _pytest.pytester import HookRecorder
+from _pytest.config import PytestPluginManager
+from _pytest.main import EXIT_OK, EXIT_TESTSFAILED
+
+
+def test_make_hook_recorder(testdir):
+ item = testdir.getitem("def test_func(): pass")
+ recorder = testdir.make_hook_recorder(item.config.pluginmanager)
+ assert not recorder.getfailures()
+
+ pytest.xfail("internal reportrecorder tests need refactoring")
+ class rep:
+ excinfo = None
+ passed = False
+ failed = True
+ skipped = False
+ when = "call"
+
+ recorder.hook.pytest_runtest_logreport(report=rep)
+ failures = recorder.getfailures()
+ assert failures == [rep]
+ failures = recorder.getfailures()
+ assert failures == [rep]
+
+ class rep:
+ excinfo = None
+ passed = False
+ failed = False
+ skipped = True
+ when = "call"
+ rep.passed = False
+ rep.skipped = True
+ recorder.hook.pytest_runtest_logreport(report=rep)
+
+ modcol = testdir.getmodulecol("")
+ rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
+ rep.passed = False
+ rep.failed = True
+ rep.skipped = False
+ recorder.hook.pytest_collectreport(report=rep)
+
+ passed, skipped, failed = recorder.listoutcomes()
+ assert not passed and skipped and failed
+
+ numpassed, numskipped, numfailed = recorder.countoutcomes()
+ assert numpassed == 0
+ assert numskipped == 1
+ assert numfailed == 1
+ assert len(recorder.getfailedcollections()) == 1
+
+ recorder.unregister()
+ recorder.clear()
+ recorder.hook.pytest_runtest_logreport(report=rep)
+ pytest.raises(ValueError, "recorder.getfailures()")
+
+
+def test_parseconfig(testdir):
+ config1 = testdir.parseconfig()
+ config2 = testdir.parseconfig()
+ assert config2 != config1
+ assert config1 != pytest.config
+
+def test_testdir_runs_with_plugin(testdir):
+ testdir.makepyfile("""
+ pytest_plugins = "pytester"
+ def test_hello(testdir):
+ assert 1
+ """)
+ result = testdir.runpytest()
+ result.assert_outcomes(passed=1)
+
+
+def make_holder():
+ class apiclass:
+ def pytest_xyz(self, arg):
+ "x"
+ def pytest_xyz_noarg(self):
+ "x"
+
+ apimod = type(os)('api')
+ def pytest_xyz(arg):
+ "x"
+ def pytest_xyz_noarg():
+ "x"
+ apimod.pytest_xyz = pytest_xyz
+ apimod.pytest_xyz_noarg = pytest_xyz_noarg
+ return apiclass, apimod
+
+
+@pytest.mark.parametrize("holder", make_holder())
+def test_hookrecorder_basic(holder):
+ pm = PytestPluginManager()
+ pm.addhooks(holder)
+ rec = HookRecorder(pm)
+ pm.hook.pytest_xyz(arg=123)
+ call = rec.popcall("pytest_xyz")
+ assert call.arg == 123
+ assert call._name == "pytest_xyz"
+ pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
+ pm.hook.pytest_xyz_noarg()
+ call = rec.popcall("pytest_xyz_noarg")
+ assert call._name == "pytest_xyz_noarg"
+
+
+def test_makepyfile_unicode(testdir):
+ global unichr
+ try:
+ unichr(65)
+ except NameError:
+ unichr = chr
+ testdir.makepyfile(unichr(0xfffd))
+
+def test_inline_run_clean_modules(testdir):
+ test_mod = testdir.makepyfile("def test_foo(): assert True")
+ result = testdir.inline_run(str(test_mod))
+ assert result.ret == EXIT_OK
+ # rewrite module, now test should fail if module was re-imported
+ test_mod.write("def test_foo(): assert False")
+ result2 = testdir.inline_run(str(test_mod))
+ assert result2.ret == EXIT_TESTSFAILED
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_recwarn.py b/testing/web-platform/tests/tools/pytest/testing/test_recwarn.py
new file mode 100644
index 000000000..87e5846c2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_recwarn.py
@@ -0,0 +1,227 @@
+import warnings
+import py
+import pytest
+from _pytest.recwarn import WarningsRecorder
+
+
+def test_recwarn_functional(testdir):
+ reprec = testdir.inline_runsource("""
+ import warnings
+ oldwarn = warnings.showwarning
+ def test_method(recwarn):
+ assert warnings.showwarning != oldwarn
+ warnings.warn("hello")
+ warn = recwarn.pop()
+ assert isinstance(warn.message, UserWarning)
+ def test_finalized():
+ assert warnings.showwarning == oldwarn
+ """)
+ res = reprec.countoutcomes()
+ assert tuple(res) == (2, 0, 0), res
+
+
+class TestWarningsRecorderChecker(object):
+ def test_recording(self, recwarn):
+ showwarning = py.std.warnings.showwarning
+ rec = WarningsRecorder()
+ with rec:
+ assert py.std.warnings.showwarning != showwarning
+ assert not rec.list
+ py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
+ assert len(rec.list) == 1
+ py.std.warnings.warn(DeprecationWarning("hello"))
+ assert len(rec.list) == 2
+ warn = rec.pop()
+ assert str(warn.message) == "hello"
+ l = rec.list
+ rec.clear()
+ assert len(rec.list) == 0
+ assert l is rec.list
+ pytest.raises(AssertionError, "rec.pop()")
+
+ assert showwarning == py.std.warnings.showwarning
+
+ def test_typechecking(self):
+ from _pytest.recwarn import WarningsChecker
+ with pytest.raises(TypeError):
+ WarningsChecker(5)
+ with pytest.raises(TypeError):
+ WarningsChecker(('hi', RuntimeWarning))
+ with pytest.raises(TypeError):
+ WarningsChecker([DeprecationWarning, RuntimeWarning])
+
+ def test_invalid_enter_exit(self):
+ # wrap this test in WarningsRecorder to ensure warning state gets reset
+ with WarningsRecorder():
+ with pytest.raises(RuntimeError):
+ rec = WarningsRecorder()
+ rec.__exit__(None, None, None) # can't exit before entering
+
+ with pytest.raises(RuntimeError):
+ rec = WarningsRecorder()
+ with rec:
+ with rec:
+ pass # can't enter twice
+
+
+class TestDeprecatedCall(object):
+ """test pytest.deprecated_call()"""
+
+ def dep(self, i, j=None):
+ if i == 0:
+ py.std.warnings.warn("is deprecated", DeprecationWarning,
+ stacklevel=1)
+ return 42
+
+ def dep_explicit(self, i):
+ if i == 0:
+ py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
+ filename="hello", lineno=3)
+
+ def test_deprecated_call_raises(self):
+ with pytest.raises(AssertionError) as excinfo:
+ pytest.deprecated_call(self.dep, 3, 5)
+ assert str(excinfo).find("did not produce") != -1
+
+ def test_deprecated_call(self):
+ pytest.deprecated_call(self.dep, 0, 5)
+
+ def test_deprecated_call_ret(self):
+ ret = pytest.deprecated_call(self.dep, 0)
+ assert ret == 42
+
+ def test_deprecated_call_preserves(self):
+ onceregistry = py.std.warnings.onceregistry.copy()
+ filters = py.std.warnings.filters[:]
+ warn = py.std.warnings.warn
+ warn_explicit = py.std.warnings.warn_explicit
+ self.test_deprecated_call_raises()
+ self.test_deprecated_call()
+ assert onceregistry == py.std.warnings.onceregistry
+ assert filters == py.std.warnings.filters
+ assert warn is py.std.warnings.warn
+ assert warn_explicit is py.std.warnings.warn_explicit
+
+ def test_deprecated_explicit_call_raises(self):
+ with pytest.raises(AssertionError):
+ pytest.deprecated_call(self.dep_explicit, 3)
+
+ def test_deprecated_explicit_call(self):
+ pytest.deprecated_call(self.dep_explicit, 0)
+ pytest.deprecated_call(self.dep_explicit, 0)
+
+ def test_deprecated_call_as_context_manager_no_warning(self):
+ with pytest.raises(pytest.fail.Exception) as ex:
+ with pytest.deprecated_call():
+ self.dep(1)
+ assert str(ex.value) == "DID NOT WARN"
+
+ def test_deprecated_call_as_context_manager(self):
+ with pytest.deprecated_call():
+ self.dep(0)
+
+ def test_deprecated_call_pending(self):
+ def f():
+ py.std.warnings.warn(PendingDeprecationWarning("hi"))
+ pytest.deprecated_call(f)
+
+ def test_deprecated_call_specificity(self):
+ other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning,
+ FutureWarning, ImportWarning, UnicodeWarning]
+ for warning in other_warnings:
+ def f():
+ py.std.warnings.warn(warning("hi"))
+ with pytest.raises(AssertionError):
+ pytest.deprecated_call(f)
+
+ def test_deprecated_function_already_called(self, testdir):
+ """deprecated_call should be able to catch a call to a deprecated
+ function even if that function has already been called in the same
+ module. See #1190.
+ """
+ testdir.makepyfile("""
+ import warnings
+ import pytest
+
+ def deprecated_function():
+ warnings.warn("deprecated", DeprecationWarning)
+
+ def test_one():
+ deprecated_function()
+
+ def test_two():
+ pytest.deprecated_call(deprecated_function)
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('*=== 2 passed in *===')
+
+
+class TestWarns(object):
+ def test_strings(self):
+ # different messages, b/c Python suppresses multiple identical warnings
+ source1 = "warnings.warn('w1', RuntimeWarning)"
+ source2 = "warnings.warn('w2', RuntimeWarning)"
+ source3 = "warnings.warn('w3', RuntimeWarning)"
+ pytest.warns(RuntimeWarning, source1)
+ pytest.raises(pytest.fail.Exception,
+ lambda: pytest.warns(UserWarning, source2))
+ pytest.warns(RuntimeWarning, source3)
+
+ def test_function(self):
+ pytest.warns(SyntaxWarning,
+ lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
+
+ def test_warning_tuple(self):
+ pytest.warns((RuntimeWarning, SyntaxWarning),
+ lambda: warnings.warn('w1', RuntimeWarning))
+ pytest.warns((RuntimeWarning, SyntaxWarning),
+ lambda: warnings.warn('w2', SyntaxWarning))
+ pytest.raises(pytest.fail.Exception,
+ lambda: pytest.warns(
+ (RuntimeWarning, SyntaxWarning),
+ lambda: warnings.warn('w3', UserWarning)))
+
+ def test_as_contextmanager(self):
+ with pytest.warns(RuntimeWarning):
+ warnings.warn("runtime", RuntimeWarning)
+
+ with pytest.raises(pytest.fail.Exception):
+ with pytest.warns(RuntimeWarning):
+ warnings.warn("user", UserWarning)
+
+ with pytest.raises(pytest.fail.Exception):
+ with pytest.warns(UserWarning):
+ warnings.warn("runtime", RuntimeWarning)
+
+ with pytest.warns(UserWarning):
+ warnings.warn("user", UserWarning)
+
+ def test_record(self):
+ with pytest.warns(UserWarning) as record:
+ warnings.warn("user", UserWarning)
+
+ assert len(record) == 1
+ assert str(record[0].message) == "user"
+
+ def test_record_only(self):
+ with pytest.warns(None) as record:
+ warnings.warn("user", UserWarning)
+ warnings.warn("runtime", RuntimeWarning)
+
+ assert len(record) == 2
+ assert str(record[0].message) == "user"
+ assert str(record[1].message) == "runtime"
+
+ def test_double_test(self, testdir):
+ """If a test is run again, the warning should still be raised"""
+ testdir.makepyfile('''
+ import pytest
+ import warnings
+
+ @pytest.mark.parametrize('run', [1, 2])
+ def test(run):
+ with pytest.warns(RuntimeWarning):
+ warnings.warn("runtime", RuntimeWarning)
+ ''')
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(['*2 passed in*'])
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_resultlog.py b/testing/web-platform/tests/tools/pytest/testing/test_resultlog.py
new file mode 100644
index 000000000..74d13f643
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_resultlog.py
@@ -0,0 +1,236 @@
+import os
+
+import _pytest._code
+import py
+import pytest
+from _pytest.main import Node, Item, FSCollector
+from _pytest.resultlog import generic_path, ResultLog, \
+ pytest_configure, pytest_unconfigure
+
+
+def test_generic_path(testdir):
+ from _pytest.main import Session
+ config = testdir.parseconfig()
+ session = Session(config)
+ p1 = Node('a', config=config, session=session)
+ #assert p1.fspath is None
+ p2 = Node('B', parent=p1)
+ p3 = Node('()', parent = p2)
+ item = Item('c', parent = p3)
+
+ res = generic_path(item)
+ assert res == 'a.B().c'
+
+ p0 = FSCollector('proj/test', config=config, session=session)
+ p1 = FSCollector('proj/test/a', parent=p0)
+ p2 = Node('B', parent=p1)
+ p3 = Node('()', parent = p2)
+ p4 = Node('c', parent=p3)
+ item = Item('[1]', parent = p4)
+
+ res = generic_path(item)
+ assert res == 'test/a:B().c[1]'
+
+def test_write_log_entry():
+ reslog = ResultLog(None, None)
+ reslog.logfile = py.io.TextIO()
+ reslog.write_log_entry('name', '.', '')
+ entry = reslog.logfile.getvalue()
+ assert entry[-1] == '\n'
+ entry_lines = entry.splitlines()
+ assert len(entry_lines) == 1
+ assert entry_lines[0] == '. name'
+
+ reslog.logfile = py.io.TextIO()
+ reslog.write_log_entry('name', 's', 'Skipped')
+ entry = reslog.logfile.getvalue()
+ assert entry[-1] == '\n'
+ entry_lines = entry.splitlines()
+ assert len(entry_lines) == 2
+ assert entry_lines[0] == 's name'
+ assert entry_lines[1] == ' Skipped'
+
+ reslog.logfile = py.io.TextIO()
+ reslog.write_log_entry('name', 's', 'Skipped\n')
+ entry = reslog.logfile.getvalue()
+ assert entry[-1] == '\n'
+ entry_lines = entry.splitlines()
+ assert len(entry_lines) == 2
+ assert entry_lines[0] == 's name'
+ assert entry_lines[1] == ' Skipped'
+
+ reslog.logfile = py.io.TextIO()
+ longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
+ reslog.write_log_entry('name', 'F', longrepr)
+ entry = reslog.logfile.getvalue()
+ assert entry[-1] == '\n'
+ entry_lines = entry.splitlines()
+ assert len(entry_lines) == 5
+ assert entry_lines[0] == 'F name'
+ assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
+
+
+class TestWithFunctionIntegration:
+ # XXX (hpk) i think that the resultlog plugin should
+ # provide a Parser object so that one can remain
+ # ignorant regarding formatting details.
+ def getresultlog(self, testdir, arg):
+ resultlog = testdir.tmpdir.join("resultlog")
+ testdir.plugins.append("resultlog")
+ args = ["--resultlog=%s" % resultlog] + [arg]
+ testdir.runpytest(*args)
+ return [x for x in resultlog.readlines(cr=0) if x]
+
+ def test_collection_report(self, testdir):
+ ok = testdir.makepyfile(test_collection_ok="")
+ skip = testdir.makepyfile(test_collection_skip=
+ "import pytest ; pytest.skip('hello')")
+ fail = testdir.makepyfile(test_collection_fail="XXX")
+ lines = self.getresultlog(testdir, ok)
+ assert not lines
+
+ lines = self.getresultlog(testdir, skip)
+ assert len(lines) == 2
+ assert lines[0].startswith("S ")
+ assert lines[0].endswith("test_collection_skip.py")
+ assert lines[1].startswith(" ")
+ assert lines[1].endswith("test_collection_skip.py:1: Skipped: hello")
+
+ lines = self.getresultlog(testdir, fail)
+ assert lines
+ assert lines[0].startswith("F ")
+ assert lines[0].endswith("test_collection_fail.py"), lines[0]
+ for x in lines[1:]:
+ assert x.startswith(" ")
+ assert "XXX" in "".join(lines[1:])
+
+ def test_log_test_outcomes(self, testdir):
+ mod = testdir.makepyfile(test_mod="""
+ import pytest
+ def test_pass(): pass
+ def test_skip(): pytest.skip("hello")
+ def test_fail(): raise ValueError("FAIL")
+
+ @pytest.mark.xfail
+ def test_xfail(): raise ValueError("XFAIL")
+ @pytest.mark.xfail
+ def test_xpass(): pass
+
+ """)
+ lines = self.getresultlog(testdir, mod)
+ assert len(lines) >= 3
+ assert lines[0].startswith(". ")
+ assert lines[0].endswith("test_pass")
+ assert lines[1].startswith("s "), lines[1]
+ assert lines[1].endswith("test_skip")
+ assert lines[2].find("hello") != -1
+
+ assert lines[3].startswith("F ")
+ assert lines[3].endswith("test_fail")
+ tb = "".join(lines[4:8])
+ assert tb.find('raise ValueError("FAIL")') != -1
+
+ assert lines[8].startswith('x ')
+ tb = "".join(lines[8:14])
+ assert tb.find('raise ValueError("XFAIL")') != -1
+
+ assert lines[14].startswith('X ')
+ assert len(lines) == 15
+
+ @pytest.mark.parametrize("style", ("native", "long", "short"))
+ def test_internal_exception(self, style):
+ # they are produced for example by a teardown failing
+ # at the end of the run or a failing hook invocation
+ try:
+ raise ValueError
+ except ValueError:
+ excinfo = _pytest._code.ExceptionInfo()
+ reslog = ResultLog(None, py.io.TextIO())
+ reslog.pytest_internalerror(excinfo.getrepr(style=style))
+ entry = reslog.logfile.getvalue()
+ entry_lines = entry.splitlines()
+
+ assert entry_lines[0].startswith('! ')
+ if style != "native":
+ assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
+ assert entry_lines[-1][0] == ' '
+ assert 'ValueError' in entry
+
+
+def test_generic(testdir, LineMatcher):
+ testdir.plugins.append("resultlog")
+ testdir.makepyfile("""
+ import pytest
+ def test_pass():
+ pass
+ def test_fail():
+ assert 0
+ def test_skip():
+ pytest.skip("")
+ @pytest.mark.xfail
+ def test_xfail():
+ assert 0
+ @pytest.mark.xfail(run=False)
+ def test_xfail_norun():
+ assert 0
+ """)
+ testdir.runpytest("--resultlog=result.log")
+ lines = testdir.tmpdir.join("result.log").readlines(cr=0)
+ LineMatcher(lines).fnmatch_lines([
+ ". *:test_pass",
+ "F *:test_fail",
+ "s *:test_skip",
+ "x *:test_xfail",
+ "x *:test_xfail_norun",
+ ])
+
+def test_makedir_for_resultlog(testdir, LineMatcher):
+ """--resultlog should automatically create directories for the log file"""
+ testdir.plugins.append("resultlog")
+ testdir.makepyfile("""
+ import pytest
+ def test_pass():
+ pass
+ """)
+ testdir.runpytest("--resultlog=path/to/result.log")
+ lines = testdir.tmpdir.join("path/to/result.log").readlines(cr=0)
+ LineMatcher(lines).fnmatch_lines([
+ ". *:test_pass",
+ ])
+
+
+def test_no_resultlog_on_slaves(testdir):
+ config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog")
+
+ assert not hasattr(config, '_resultlog')
+ pytest_configure(config)
+ assert hasattr(config, '_resultlog')
+ pytest_unconfigure(config)
+ assert not hasattr(config, '_resultlog')
+
+ config.slaveinput = {}
+ pytest_configure(config)
+ assert not hasattr(config, '_resultlog')
+ pytest_unconfigure(config)
+ assert not hasattr(config, '_resultlog')
+
+
+def test_failure_issue380(testdir):
+ testdir.makeconftest("""
+ import pytest
+ class MyCollector(pytest.File):
+ def collect(self):
+ raise ValueError()
+ def repr_failure(self, excinfo):
+ return "somestring"
+ def pytest_collect_file(path, parent):
+ return MyCollector(parent=parent, fspath=path)
+ """)
+ testdir.makepyfile("""
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest("--resultlog=log")
+ assert result.ret == 1
+
+
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_runner.py b/testing/web-platform/tests/tools/pytest/testing/test_runner.py
new file mode 100644
index 000000000..4421c5d0d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_runner.py
@@ -0,0 +1,634 @@
+# -*- coding: utf-8 -*-
+from __future__ import with_statement
+
+import _pytest._code
+import os
+import py
+import pytest
+import sys
+from _pytest import runner, main
+
+class TestSetupState:
+ def test_setup(self, testdir):
+ ss = runner.SetupState()
+ item = testdir.getitem("def test_func(): pass")
+ l = [1]
+ ss.prepare(item)
+ ss.addfinalizer(l.pop, colitem=item)
+ assert l
+ ss._pop_and_teardown()
+ assert not l
+
+ def test_teardown_exact_stack_empty(self, testdir):
+ item = testdir.getitem("def test_func(): pass")
+ ss = runner.SetupState()
+ ss.teardown_exact(item, None)
+ ss.teardown_exact(item, None)
+ ss.teardown_exact(item, None)
+
+ def test_setup_fails_and_failure_is_cached(self, testdir):
+ item = testdir.getitem("""
+ def setup_module(mod):
+ raise ValueError(42)
+ def test_func(): pass
+ """) # noqa
+ ss = runner.SetupState()
+ pytest.raises(ValueError, lambda: ss.prepare(item))
+ pytest.raises(ValueError, lambda: ss.prepare(item))
+
+ def test_teardown_multiple_one_fails(self, testdir):
+ r = []
+ def fin1(): r.append('fin1')
+ def fin2(): raise Exception('oops')
+ def fin3(): r.append('fin3')
+ item = testdir.getitem("def test_func(): pass")
+ ss = runner.SetupState()
+ ss.addfinalizer(fin1, item)
+ ss.addfinalizer(fin2, item)
+ ss.addfinalizer(fin3, item)
+ with pytest.raises(Exception) as err:
+ ss._callfinalizers(item)
+ assert err.value.args == ('oops',)
+ assert r == ['fin3', 'fin1']
+
+ def test_teardown_multiple_fail(self, testdir):
+ # Ensure the first exception is the one which is re-raised.
+ # Ideally both would be reported however.
+ def fin1(): raise Exception('oops1')
+ def fin2(): raise Exception('oops2')
+ item = testdir.getitem("def test_func(): pass")
+ ss = runner.SetupState()
+ ss.addfinalizer(fin1, item)
+ ss.addfinalizer(fin2, item)
+ with pytest.raises(Exception) as err:
+ ss._callfinalizers(item)
+ assert err.value.args == ('oops2',)
+
+
+class BaseFunctionalTests:
+ def test_passfunction(self, testdir):
+ reports = testdir.runitem("""
+ def test_func():
+ pass
+ """)
+ rep = reports[1]
+ assert rep.passed
+ assert not rep.failed
+ assert rep.outcome == "passed"
+ assert not rep.longrepr
+
+ def test_failfunction(self, testdir):
+ reports = testdir.runitem("""
+ def test_func():
+ assert 0
+ """)
+ rep = reports[1]
+ assert not rep.passed
+ assert not rep.skipped
+ assert rep.failed
+ assert rep.when == "call"
+ assert rep.outcome == "failed"
+ #assert isinstance(rep.longrepr, ReprExceptionInfo)
+
+ def test_skipfunction(self, testdir):
+ reports = testdir.runitem("""
+ import pytest
+ def test_func():
+ pytest.skip("hello")
+ """)
+ rep = reports[1]
+ assert not rep.failed
+ assert not rep.passed
+ assert rep.skipped
+ assert rep.outcome == "skipped"
+ #assert rep.skipped.when == "call"
+ #assert rep.skipped.when == "call"
+ #assert rep.skipped == "%sreason == "hello"
+ #assert rep.skipped.location.lineno == 3
+ #assert rep.skipped.location.path
+ #assert not rep.skipped.failurerepr
+
+ def test_skip_in_setup_function(self, testdir):
+ reports = testdir.runitem("""
+ import pytest
+ def setup_function(func):
+ pytest.skip("hello")
+ def test_func():
+ pass
+ """)
+ print(reports)
+ rep = reports[0]
+ assert not rep.failed
+ assert not rep.passed
+ assert rep.skipped
+ #assert rep.skipped.reason == "hello"
+ #assert rep.skipped.location.lineno == 3
+ #assert rep.skipped.location.lineno == 3
+ assert len(reports) == 2
+ assert reports[1].passed # teardown
+
+ def test_failure_in_setup_function(self, testdir):
+ reports = testdir.runitem("""
+ import pytest
+ def setup_function(func):
+ raise ValueError(42)
+ def test_func():
+ pass
+ """)
+ rep = reports[0]
+ assert not rep.skipped
+ assert not rep.passed
+ assert rep.failed
+ assert rep.when == "setup"
+ assert len(reports) == 2
+
+ def test_failure_in_teardown_function(self, testdir):
+ reports = testdir.runitem("""
+ import pytest
+ def teardown_function(func):
+ raise ValueError(42)
+ def test_func():
+ pass
+ """)
+ print(reports)
+ assert len(reports) == 3
+ rep = reports[2]
+ assert not rep.skipped
+ assert not rep.passed
+ assert rep.failed
+ assert rep.when == "teardown"
+ #assert rep.longrepr.reprcrash.lineno == 3
+ #assert rep.longrepr.reprtraceback.reprentries
+
+ def test_custom_failure_repr(self, testdir):
+ testdir.makepyfile(conftest="""
+ import pytest
+ class Function(pytest.Function):
+ def repr_failure(self, excinfo):
+ return "hello"
+ """)
+ reports = testdir.runitem("""
+ import pytest
+ def test_func():
+ assert 0
+ """)
+ rep = reports[1]
+ assert not rep.skipped
+ assert not rep.passed
+ assert rep.failed
+ #assert rep.outcome.when == "call"
+ #assert rep.failed.where.lineno == 3
+ #assert rep.failed.where.path.basename == "test_func.py"
+ #assert rep.failed.failurerepr == "hello"
+
+ def test_teardown_final_returncode(self, testdir):
+ rec = testdir.inline_runsource("""
+ def test_func():
+ pass
+ def teardown_function(func):
+ raise ValueError(42)
+ """)
+ assert rec.ret == 1
+
+ def test_exact_teardown_issue90(self, testdir):
+ rec = testdir.inline_runsource("""
+ import pytest
+
+ class TestClass:
+ def test_method(self):
+ pass
+ def teardown_class(cls):
+ raise Exception()
+
+ def test_func():
+ import sys
+ # on python2 exc_info is keept till a function exits
+ # so we would end up calling test functions while
+ # sys.exc_info would return the indexerror
+ # from guessing the lastitem
+ excinfo = sys.exc_info()
+ import traceback
+ assert excinfo[0] is None, \
+ traceback.format_exception(*excinfo)
+ def teardown_function(func):
+ raise ValueError(42)
+ """)
+ reps = rec.getreports("pytest_runtest_logreport")
+ print (reps)
+ for i in range(2):
+ assert reps[i].nodeid.endswith("test_method")
+ assert reps[i].passed
+ assert reps[2].when == "teardown"
+ assert reps[2].failed
+ assert len(reps) == 6
+ for i in range(3,5):
+ assert reps[i].nodeid.endswith("test_func")
+ assert reps[i].passed
+ assert reps[5].when == "teardown"
+ assert reps[5].nodeid.endswith("test_func")
+ assert reps[5].failed
+
+ def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
+ testdir.makepyfile(conftest="""
+ import pytest
+ class Function(pytest.Function):
+ def repr_failure(self, excinfo):
+ assert 0
+ """)
+ reports = testdir.runitem("""
+ def setup_function(func):
+ raise ValueError(42)
+ def test_func():
+ pass
+ """)
+ assert len(reports) == 2
+ rep = reports[0]
+ print(rep)
+ assert not rep.skipped
+ assert not rep.passed
+ assert rep.failed
+ #assert rep.outcome.when == "setup"
+ #assert rep.outcome.where.lineno == 3
+ #assert rep.outcome.where.path.basename == "test_func.py"
+ #assert instanace(rep.failed.failurerepr, PythonFailureRepr)
+
+ def test_systemexit_does_not_bail_out(self, testdir):
+ try:
+ reports = testdir.runitem("""
+ def test_func():
+ raise SystemExit(42)
+ """)
+ except SystemExit:
+ pytest.fail("runner did not catch SystemExit")
+ rep = reports[1]
+ assert rep.failed
+ assert rep.when == "call"
+
+ def test_exit_propagates(self, testdir):
+ try:
+ testdir.runitem("""
+ import pytest
+ def test_func():
+ raise pytest.exit.Exception()
+ """)
+ except pytest.exit.Exception:
+ pass
+ else:
+ pytest.fail("did not raise")
+
+class TestExecutionNonForked(BaseFunctionalTests):
+ def getrunner(self):
+ def f(item):
+ return runner.runtestprotocol(item, log=False)
+ return f
+
+ def test_keyboardinterrupt_propagates(self, testdir):
+ try:
+ testdir.runitem("""
+ def test_func():
+ raise KeyboardInterrupt("fake")
+ """)
+ except KeyboardInterrupt:
+ pass
+ else:
+ pytest.fail("did not raise")
+
+class TestExecutionForked(BaseFunctionalTests):
+ pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
+
+ def getrunner(self):
+ # XXX re-arrange this test to live in pytest-xdist
+ boxed = pytest.importorskip("xdist.boxed")
+ return boxed.forked_run_report
+
+ def test_suicide(self, testdir):
+ reports = testdir.runitem("""
+ def test_func():
+ import os
+ os.kill(os.getpid(), 15)
+ """)
+ rep = reports[0]
+ assert rep.failed
+ assert rep.when == "???"
+
+class TestSessionReports:
+ def test_collect_result(self, testdir):
+ col = testdir.getmodulecol("""
+ def test_func1():
+ pass
+ class TestClass:
+ pass
+ """)
+ rep = runner.collect_one_node(col)
+ assert not rep.failed
+ assert not rep.skipped
+ assert rep.passed
+ locinfo = rep.location
+ assert locinfo[0] == col.fspath.basename
+ assert not locinfo[1]
+ assert locinfo[2] == col.fspath.basename
+ res = rep.result
+ assert len(res) == 2
+ assert res[0].name == "test_func1"
+ assert res[1].name == "TestClass"
+
+ def test_skip_at_module_scope(self, testdir):
+ col = testdir.getmodulecol("""
+ import pytest
+ pytest.skip("hello")
+ def test_func():
+ pass
+ """)
+ rep = main.collect_one_node(col)
+ assert not rep.failed
+ assert not rep.passed
+ assert rep.skipped
+
+
+reporttypes = [
+ runner.BaseReport,
+ runner.TestReport,
+ runner.TeardownErrorReport,
+ runner.CollectReport,
+]
+
+@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
+def test_report_extra_parameters(reporttype):
+ if hasattr(py.std.inspect, 'signature'):
+ args = list(py.std.inspect.signature(reporttype.__init__).parameters.keys())[1:]
+ else:
+ args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
+ basekw = dict.fromkeys(args, [])
+ report = reporttype(newthing=1, **basekw)
+ assert report.newthing == 1
+
+def test_callinfo():
+ ci = runner.CallInfo(lambda: 0, '123')
+ assert ci.when == "123"
+ assert ci.result == 0
+ assert "result" in repr(ci)
+ ci = runner.CallInfo(lambda: 0/0, '123')
+ assert ci.when == "123"
+ assert not hasattr(ci, 'result')
+ assert ci.excinfo
+ assert "exc" in repr(ci)
+
+# design question: do we want general hooks in python files?
+# then something like the following functional tests makes sense
+@pytest.mark.xfail
+def test_runtest_in_module_ordering(testdir):
+ p1 = testdir.makepyfile("""
+ def pytest_runtest_setup(item): # runs after class-level!
+ item.function.mylist.append("module")
+ class TestClass:
+ def pytest_runtest_setup(self, item):
+ assert not hasattr(item.function, 'mylist')
+ item.function.mylist = ['class']
+ def pytest_funcarg__mylist(self, request):
+ return request.function.mylist
+ def pytest_runtest_call(self, item, __multicall__):
+ try:
+ __multicall__.execute()
+ except ValueError:
+ pass
+ def test_hello1(self, mylist):
+ assert mylist == ['class', 'module'], mylist
+ raise ValueError()
+ def test_hello2(self, mylist):
+ assert mylist == ['class', 'module'], mylist
+ def pytest_runtest_teardown(item):
+ del item.function.mylist
+ """)
+ result = testdir.runpytest(p1)
+ result.stdout.fnmatch_lines([
+ "*2 passed*"
+ ])
+
+
+def test_outcomeexception_exceptionattributes():
+ outcome = runner.OutcomeException('test')
+ assert outcome.args[0] == outcome.msg
+
+def test_pytest_exit():
+ try:
+ pytest.exit("hello")
+ except pytest.exit.Exception:
+ excinfo = _pytest._code.ExceptionInfo()
+ assert excinfo.errisinstance(KeyboardInterrupt)
+
+def test_pytest_fail():
+ try:
+ pytest.fail("hello")
+ except pytest.fail.Exception:
+ excinfo = _pytest._code.ExceptionInfo()
+ s = excinfo.exconly(tryshort=True)
+ assert s.startswith("Failed")
+
+def test_pytest_fail_notrace(testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_hello():
+ pytest.fail("hello", pytrace=False)
+ def teardown_function(function):
+ pytest.fail("world", pytrace=False)
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "world",
+ "hello",
+ ])
+ assert 'def teardown_function' not in result.stdout.str()
+
+
+@pytest.mark.parametrize('str_prefix', ['u', ''])
+def test_pytest_fail_notrace_non_ascii(testdir, str_prefix):
+ """Fix pytest.fail with pytrace=False with non-ascii characters (#1178).
+
+ This tests with native and unicode strings containing non-ascii chars.
+ """
+ testdir.makepyfile(u"""
+ # coding: utf-8
+ import pytest
+
+ def test_hello():
+ pytest.fail(%s'oh oh: ☺', pytrace=False)
+ """ % str_prefix)
+ result = testdir.runpytest()
+ if sys.version_info[0] >= 3:
+ result.stdout.fnmatch_lines(['*test_hello*', "oh oh: ☺"])
+ else:
+ result.stdout.fnmatch_lines(['*test_hello*', "oh oh: *"])
+ assert 'def test_hello' not in result.stdout.str()
+
+
+def test_pytest_no_tests_collected_exit_status(testdir):
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('*collected 0 items*')
+ assert result.ret == main.EXIT_NOTESTSCOLLECTED
+
+ testdir.makepyfile(test_foo="""
+ def test_foo():
+ assert 1
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines('*collected 1 items*')
+ result.stdout.fnmatch_lines('*1 passed*')
+ assert result.ret == main.EXIT_OK
+
+ result = testdir.runpytest('-k nonmatch')
+ result.stdout.fnmatch_lines('*collected 1 items*')
+ result.stdout.fnmatch_lines('*1 deselected*')
+ assert result.ret == main.EXIT_NOTESTSCOLLECTED
+
+
+def test_exception_printing_skip():
+ try:
+ pytest.skip("hello")
+ except pytest.skip.Exception:
+ excinfo = _pytest._code.ExceptionInfo()
+ s = excinfo.exconly(tryshort=True)
+ assert s.startswith("Skipped")
+
+def test_importorskip(monkeypatch):
+ importorskip = pytest.importorskip
+ def f():
+ importorskip("asdlkj")
+ try:
+ sys = importorskip("sys") # noqa
+ assert sys == py.std.sys
+ #path = pytest.importorskip("os.path")
+ #assert path == py.std.os.path
+ excinfo = pytest.raises(pytest.skip.Exception, f)
+ path = py.path.local(excinfo.getrepr().reprcrash.path)
+ # check that importorskip reports the actual call
+ # in this test the test_runner.py file
+ assert path.purebasename == "test_runner"
+ pytest.raises(SyntaxError, "pytest.importorskip('x y z')")
+ pytest.raises(SyntaxError, "pytest.importorskip('x=y')")
+ mod = py.std.types.ModuleType("hello123")
+ mod.__version__ = "1.3"
+ monkeypatch.setitem(sys.modules, "hello123", mod)
+ pytest.raises(pytest.skip.Exception, """
+ pytest.importorskip("hello123", minversion="1.3.1")
+ """)
+ mod2 = pytest.importorskip("hello123", minversion="1.3")
+ assert mod2 == mod
+ except pytest.skip.Exception:
+ print(_pytest._code.ExceptionInfo())
+ pytest.fail("spurious skip")
+
+def test_importorskip_imports_last_module_part():
+ ospath = pytest.importorskip("os.path")
+ assert os.path == ospath
+
+def test_importorskip_dev_module(monkeypatch):
+ try:
+ mod = py.std.types.ModuleType("mockmodule")
+ mod.__version__ = '0.13.0.dev-43290'
+ monkeypatch.setitem(sys.modules, 'mockmodule', mod)
+ mod2 = pytest.importorskip('mockmodule', minversion='0.12.0')
+ assert mod2 == mod
+ pytest.raises(pytest.skip.Exception, """
+ pytest.importorskip('mockmodule1', minversion='0.14.0')""")
+ except pytest.skip.Exception:
+ print(_pytest._code.ExceptionInfo())
+ pytest.fail("spurious skip")
+
+
+def test_pytest_cmdline_main(testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def test_hello():
+ assert 1
+ if __name__ == '__main__':
+ pytest.cmdline.main([__file__])
+ """)
+ import subprocess
+ popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
+ popen.communicate()
+ ret = popen.wait()
+ assert ret == 0
+
+
+def test_unicode_in_longrepr(testdir):
+ testdir.makeconftest("""
+ import py
+ def pytest_runtest_makereport(__multicall__):
+ rep = __multicall__.execute()
+ if rep.when == "call":
+ rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8")
+ return rep
+ """)
+ testdir.makepyfile("""
+ def test_out():
+ assert 0
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 1
+ assert "UnicodeEncodeError" not in result.stderr.str()
+
+
+def test_failure_in_setup(testdir):
+ testdir.makepyfile("""
+ def setup_module():
+ 0/0
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest("--tb=line")
+ assert "def setup_module" not in result.stdout.str()
+
+
+def test_makereport_getsource(testdir):
+ testdir.makepyfile("""
+ def test_foo():
+ if False: pass
+ else: assert False
+ """)
+ result = testdir.runpytest()
+ assert 'INTERNALERROR' not in result.stdout.str()
+ result.stdout.fnmatch_lines(['*else: assert False*'])
+
+
+def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
+ """Test that exception in dynamically generated code doesn't break getting the source line."""
+ import inspect
+ original_findsource = inspect.findsource
+ def findsource(obj, *args, **kwargs):
+ # Can be triggered by dynamically created functions
+ if obj.__name__ == 'foo':
+ raise IndexError()
+ return original_findsource(obj, *args, **kwargs)
+ monkeypatch.setattr(inspect, 'findsource', findsource)
+
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture
+ def foo(missing):
+ pass
+
+ def test_fix(foo):
+ assert False
+ """)
+ result = testdir.runpytest('-vv')
+ assert 'INTERNALERROR' not in result.stdout.str()
+ result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
+
+
+def test_store_except_info_on_eror():
+ """ Test that upon test failure, the exception info is stored on
+ sys.last_traceback and friends.
+ """
+ # Simulate item that raises a specific exception
+ class ItemThatRaises:
+ def runtest(self):
+ raise IndexError('TEST')
+ try:
+ runner.pytest_runtest_call(ItemThatRaises())
+ except IndexError:
+ pass
+ # Check that exception info is stored on sys
+ assert sys.last_type is IndexError
+ assert sys.last_value.args[0] == 'TEST'
+ assert sys.last_traceback
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_runner_xunit.py b/testing/web-platform/tests/tools/pytest/testing/test_runner_xunit.py
new file mode 100644
index 000000000..f32a1311b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_runner_xunit.py
@@ -0,0 +1,252 @@
+#
+# test correct setup/teardowns at
+# module, class, and instance level
+
+def test_module_and_function_setup(testdir):
+ reprec = testdir.inline_runsource("""
+ modlevel = []
+ def setup_module(module):
+ assert not modlevel
+ module.modlevel.append(42)
+
+ def teardown_module(module):
+ modlevel.pop()
+
+ def setup_function(function):
+ function.answer = 17
+
+ def teardown_function(function):
+ del function.answer
+
+ def test_modlevel():
+ assert modlevel[0] == 42
+ assert test_modlevel.answer == 17
+
+ class TestFromClass:
+ def test_module(self):
+ assert modlevel[0] == 42
+ assert not hasattr(test_modlevel, 'answer')
+ """)
+ rep = reprec.matchreport("test_modlevel")
+ assert rep.passed
+ rep = reprec.matchreport("test_module")
+ assert rep.passed
+
+def test_module_setup_failure_no_teardown(testdir):
+ reprec = testdir.inline_runsource("""
+ l = []
+ def setup_module(module):
+ l.append(1)
+ 0/0
+
+ def test_nothing():
+ pass
+
+ def teardown_module(module):
+ l.append(2)
+ """)
+ reprec.assertoutcome(failed=1)
+ calls = reprec.getcalls("pytest_runtest_setup")
+ assert calls[0].item.module.l == [1]
+
+def test_setup_function_failure_no_teardown(testdir):
+ reprec = testdir.inline_runsource("""
+ modlevel = []
+ def setup_function(function):
+ modlevel.append(1)
+ 0/0
+
+ def teardown_function(module):
+ modlevel.append(2)
+
+ def test_func():
+ pass
+ """)
+ calls = reprec.getcalls("pytest_runtest_setup")
+ assert calls[0].item.module.modlevel == [1]
+
+def test_class_setup(testdir):
+ reprec = testdir.inline_runsource("""
+ class TestSimpleClassSetup:
+ clslevel = []
+ def setup_class(cls):
+ cls.clslevel.append(23)
+
+ def teardown_class(cls):
+ cls.clslevel.pop()
+
+ def test_classlevel(self):
+ assert self.clslevel[0] == 23
+
+ class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
+ def test_classlevel_anothertime(self):
+ assert self.clslevel == [23]
+
+ def test_cleanup():
+ assert not TestSimpleClassSetup.clslevel
+ assert not TestInheritedClassSetupStillWorks.clslevel
+ """)
+ reprec.assertoutcome(passed=1+2+1)
+
+def test_class_setup_failure_no_teardown(testdir):
+ reprec = testdir.inline_runsource("""
+ class TestSimpleClassSetup:
+ clslevel = []
+ def setup_class(cls):
+ 0/0
+
+ def teardown_class(cls):
+ cls.clslevel.append(1)
+
+ def test_classlevel(self):
+ pass
+
+ def test_cleanup():
+ assert not TestSimpleClassSetup.clslevel
+ """)
+ reprec.assertoutcome(failed=1, passed=1)
+
+def test_method_setup(testdir):
+ reprec = testdir.inline_runsource("""
+ class TestSetupMethod:
+ def setup_method(self, meth):
+ self.methsetup = meth
+ def teardown_method(self, meth):
+ del self.methsetup
+
+ def test_some(self):
+ assert self.methsetup == self.test_some
+
+ def test_other(self):
+ assert self.methsetup == self.test_other
+ """)
+ reprec.assertoutcome(passed=2)
+
+def test_method_setup_failure_no_teardown(testdir):
+ reprec = testdir.inline_runsource("""
+ class TestMethodSetup:
+ clslevel = []
+ def setup_method(self, method):
+ self.clslevel.append(1)
+ 0/0
+
+ def teardown_method(self, method):
+ self.clslevel.append(2)
+
+ def test_method(self):
+ pass
+
+ def test_cleanup():
+ assert TestMethodSetup.clslevel == [1]
+ """)
+ reprec.assertoutcome(failed=1, passed=1)
+
+def test_method_generator_setup(testdir):
+ reprec = testdir.inline_runsource("""
+ class TestSetupTeardownOnInstance:
+ def setup_class(cls):
+ cls.classsetup = True
+
+ def setup_method(self, method):
+ self.methsetup = method
+
+ def test_generate(self):
+ assert self.classsetup
+ assert self.methsetup == self.test_generate
+ yield self.generated, 5
+ yield self.generated, 2
+
+ def generated(self, value):
+ assert self.classsetup
+ assert self.methsetup == self.test_generate
+ assert value == 5
+ """)
+ reprec.assertoutcome(passed=1, failed=1)
+
+def test_func_generator_setup(testdir):
+ reprec = testdir.inline_runsource("""
+ import sys
+
+ def setup_module(mod):
+ print ("setup_module")
+ mod.x = []
+
+ def setup_function(fun):
+ print ("setup_function")
+ x.append(1)
+
+ def teardown_function(fun):
+ print ("teardown_function")
+ x.pop()
+
+ def test_one():
+ assert x == [1]
+ def check():
+ print ("check")
+ sys.stderr.write("e\\n")
+ assert x == [1]
+ yield check
+ assert x == [1]
+ """)
+ rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
+ assert rep.passed
+
+def test_method_setup_uses_fresh_instances(testdir):
+ reprec = testdir.inline_runsource("""
+ class TestSelfState1:
+ memory = []
+ def test_hello(self):
+ self.memory.append(self)
+
+ def test_afterhello(self):
+ assert self != self.memory[0]
+ """)
+ reprec.assertoutcome(passed=2, failed=0)
+
+def test_setup_that_skips_calledagain(testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def setup_module(mod):
+ pytest.skip("x")
+ def test_function1():
+ pass
+ def test_function2():
+ pass
+ """)
+ reprec = testdir.inline_run(p)
+ reprec.assertoutcome(skipped=2)
+
+def test_setup_fails_again_on_all_tests(testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def setup_module(mod):
+ raise ValueError(42)
+ def test_function1():
+ pass
+ def test_function2():
+ pass
+ """)
+ reprec = testdir.inline_run(p)
+ reprec.assertoutcome(failed=2)
+
+def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def setup_module(mod):
+ raise ValueError(42)
+ def pytest_funcarg__hello(request):
+ raise ValueError("xyz43")
+ def test_function1(hello):
+ pass
+ def test_function2(hello):
+ pass
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*function1*",
+ "*ValueError*42*",
+ "*function2*",
+ "*ValueError*42*",
+ "*2 error*"
+ ])
+ assert "xyz43" not in result.stdout.str()
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_session.py b/testing/web-platform/tests/tools/pytest/testing/test_session.py
new file mode 100644
index 000000000..76f804b4f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_session.py
@@ -0,0 +1,244 @@
+import pytest
+
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+
+class SessionTests:
+ def test_basic_testitem_events(self, testdir):
+ tfile = testdir.makepyfile("""
+ def test_one():
+ pass
+ def test_one_one():
+ assert 0
+ def test_other():
+ raise ValueError(23)
+ class TestClass:
+ def test_two(self, someargs):
+ pass
+ """)
+ reprec = testdir.inline_run(tfile)
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(skipped) == 0
+ assert len(passed) == 1
+ assert len(failed) == 3
+ end = lambda x: x.nodeid.split("::")[-1]
+ assert end(failed[0]) == "test_one_one"
+ assert end(failed[1]) == "test_other"
+ itemstarted = reprec.getcalls("pytest_itemcollected")
+ assert len(itemstarted) == 4
+ # XXX check for failing funcarg setup
+ #colreports = reprec.getcalls("pytest_collectreport")
+ #assert len(colreports) == 4
+ #assert colreports[1].report.failed
+
+ def test_nested_import_error(self, testdir):
+ tfile = testdir.makepyfile("""
+ import import_fails
+ def test_this():
+ assert import_fails.a == 1
+ """, import_fails="""
+ import does_not_work
+ a = 1
+ """)
+ reprec = testdir.inline_run(tfile)
+ l = reprec.getfailedcollections()
+ assert len(l) == 1
+ out = l[0].longrepr.reprcrash.message
+ assert out.find('does_not_work') != -1
+
+ def test_raises_output(self, testdir):
+ reprec = testdir.inline_runsource("""
+ import pytest
+ def test_raises_doesnt():
+ pytest.raises(ValueError, int, "3")
+ """)
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(failed) == 1
+ out = failed[0].longrepr.reprcrash.message
+ if not out.find("DID NOT RAISE") != -1:
+ print(out)
+ pytest.fail("incorrect raises() output")
+
+ def test_generator_yields_None(self, testdir):
+ reprec = testdir.inline_runsource("""
+ def test_1():
+ yield None
+ """)
+ failures = reprec.getfailedcollections()
+ out = failures[0].longrepr.reprcrash.message
+ i = out.find('TypeError')
+ assert i != -1
+
+ def test_syntax_error_module(self, testdir):
+ reprec = testdir.inline_runsource("this is really not python")
+ l = reprec.getfailedcollections()
+ assert len(l) == 1
+ out = str(l[0].longrepr)
+ assert out.find(str('not python')) != -1
+
+ def test_exit_first_problem(self, testdir):
+ reprec = testdir.inline_runsource("""
+ def test_one(): assert 0
+ def test_two(): assert 0
+ """, '--exitfirst')
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 1
+ assert passed == skipped == 0
+
+ def test_maxfail(self, testdir):
+ reprec = testdir.inline_runsource("""
+ def test_one(): assert 0
+ def test_two(): assert 0
+ def test_three(): assert 0
+ """, '--maxfail=2')
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 2
+ assert passed == skipped == 0
+
+ def test_broken_repr(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ class BrokenRepr1:
+ foo=0
+ def __repr__(self):
+ raise Exception("Ha Ha fooled you, I'm a broken repr().")
+
+ class TestBrokenClass:
+ def test_explicit_bad_repr(self):
+ t = BrokenRepr1()
+ pytest.raises(Exception, 'repr(t)')
+
+ def test_implicit_bad_repr1(self):
+ t = BrokenRepr1()
+ assert t.foo == 1
+
+ """)
+ reprec = testdir.inline_run(p)
+ passed, skipped, failed = reprec.listoutcomes()
+ assert len(failed) == 1
+ out = failed[0].longrepr.reprcrash.message
+ assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #'
+
+ def test_skip_file_by_conftest(self, testdir):
+ testdir.makepyfile(conftest="""
+ import pytest
+ def pytest_collect_file():
+ pytest.skip("intentional")
+ """, test_file="""
+ def test_one(): pass
+ """)
+ try:
+ reprec = testdir.inline_run(testdir.tmpdir)
+ except pytest.skip.Exception:
+ pytest.fail("wrong skipped caught")
+ reports = reprec.getreports("pytest_collectreport")
+ assert len(reports) == 1
+ assert reports[0].skipped
+
+class TestNewSession(SessionTests):
+
+ def test_order_of_execution(self, testdir):
+ reprec = testdir.inline_runsource("""
+ l = []
+ def test_1():
+ l.append(1)
+ def test_2():
+ l.append(2)
+ def test_3():
+ assert l == [1,2]
+ class Testmygroup:
+ reslist = l
+ def test_1(self):
+ self.reslist.append(1)
+ def test_2(self):
+ self.reslist.append(2)
+ def test_3(self):
+ self.reslist.append(3)
+ def test_4(self):
+ assert self.reslist == [1,2,1,2,3]
+ """)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == skipped == 0
+ assert passed == 7
+ # also test listnames() here ...
+
+ def test_collect_only_with_various_situations(self, testdir):
+ p = testdir.makepyfile(
+ test_one="""
+ def test_one():
+ raise ValueError()
+
+ class TestX:
+ def test_method_one(self):
+ pass
+
+ class TestY(TestX):
+ pass
+ """,
+ test_two="""
+ import pytest
+ pytest.skip('xxx')
+ """,
+ test_three="xxxdsadsadsadsa",
+ __init__=""
+ )
+ reprec = testdir.inline_run('--collect-only', p.dirpath())
+
+ itemstarted = reprec.getcalls("pytest_itemcollected")
+ assert len(itemstarted) == 3
+ assert not reprec.getreports("pytest_runtest_logreport")
+ started = reprec.getcalls("pytest_collectstart")
+ finished = reprec.getreports("pytest_collectreport")
+ assert len(started) == len(finished)
+ assert len(started) == 8 # XXX extra TopCollector
+ colfail = [x for x in finished if x.failed]
+ colskipped = [x for x in finished if x.skipped]
+ assert len(colfail) == 1
+ assert len(colskipped) == 1
+
+ def test_minus_x_import_error(self, testdir):
+ testdir.makepyfile(__init__="")
+ testdir.makepyfile(test_one="xxxx", test_two="yyyy")
+ reprec = testdir.inline_run("-x", testdir.tmpdir)
+ finished = reprec.getreports("pytest_collectreport")
+ colfail = [x for x in finished if x.failed]
+ assert len(colfail) == 1
+
+
+def test_plugin_specify(testdir):
+ pytest.raises(ImportError, """
+ testdir.parseconfig("-p", "nqweotexistent")
+ """)
+ #pytest.raises(ImportError,
+ # "config.do_configure(config)"
+ #)
+
+def test_plugin_already_exists(testdir):
+ config = testdir.parseconfig("-p", "terminal")
+ assert config.option.plugins == ['terminal']
+ config._do_configure()
+ config._ensure_unconfigure()
+
+def test_exclude(testdir):
+ hellodir = testdir.mkdir("hello")
+ hellodir.join("test_hello.py").write("x y syntaxerror")
+ hello2dir = testdir.mkdir("hello2")
+ hello2dir.join("test_hello2.py").write("x y syntaxerror")
+ testdir.makepyfile(test_ok="def test_pass(): pass")
+ result = testdir.runpytest("--ignore=hello", "--ignore=hello2")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+def test_sessionfinish_with_start(testdir):
+ testdir.makeconftest("""
+ import os
+ l = []
+ def pytest_sessionstart():
+ l.append(os.getcwd())
+ os.chdir("..")
+
+ def pytest_sessionfinish():
+ assert l[0] == os.getcwd()
+
+ """)
+ res = testdir.runpytest("--collect-only")
+ assert res.ret == EXIT_NOTESTSCOLLECTED
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_skipping.py b/testing/web-platform/tests/tools/pytest/testing/test_skipping.py
new file mode 100644
index 000000000..3464974e0
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_skipping.py
@@ -0,0 +1,917 @@
+import pytest
+import sys
+
+from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup
+from _pytest.runner import runtestprotocol
+
+
+class TestEvaluator:
+ def test_no_marker(self, testdir):
+ item = testdir.getitem("def test_func(): pass")
+ evalskipif = MarkEvaluator(item, 'skipif')
+ assert not evalskipif
+ assert not evalskipif.istrue()
+
+ def test_marked_no_args(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xyz
+ def test_func():
+ pass
+ """)
+ ev = MarkEvaluator(item, 'xyz')
+ assert ev
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == ""
+ assert not ev.get("run", False)
+
+ def test_marked_one_arg(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xyz("hasattr(os, 'sep')")
+ def test_func():
+ pass
+ """)
+ ev = MarkEvaluator(item, 'xyz')
+ assert ev
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == "condition: hasattr(os, 'sep')"
+
+ @pytest.mark.skipif('sys.version_info[0] >= 3')
+ def test_marked_one_arg_unicode(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xyz(u"hasattr(os, 'sep')")
+ def test_func():
+ pass
+ """)
+ ev = MarkEvaluator(item, 'xyz')
+ assert ev
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == "condition: hasattr(os, 'sep')"
+
+ def test_marked_one_arg_with_reason(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
+ def test_func():
+ pass
+ """)
+ ev = MarkEvaluator(item, 'xyz')
+ assert ev
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == "hello world"
+ assert ev.get("attr") == 2
+
+ def test_marked_one_arg_twice(self, testdir):
+ lines = [
+ '''@pytest.mark.skipif("not hasattr(os, 'murks')")''',
+ '''@pytest.mark.skipif("hasattr(os, 'murks')")'''
+ ]
+ for i in range(0, 2):
+ item = testdir.getitem("""
+ import pytest
+ %s
+ %s
+ def test_func():
+ pass
+ """ % (lines[i], lines[(i+1) %2]))
+ ev = MarkEvaluator(item, 'skipif')
+ assert ev
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == "condition: not hasattr(os, 'murks')"
+
+ def test_marked_one_arg_twice2(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.skipif("hasattr(os, 'murks')")
+ @pytest.mark.skipif("not hasattr(os, 'murks')")
+ def test_func():
+ pass
+ """)
+ ev = MarkEvaluator(item, 'skipif')
+ assert ev
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == "condition: not hasattr(os, 'murks')"
+
+ def test_marked_skip_with_not_string(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.skipif(False)
+ def test_func():
+ pass
+ """)
+ ev = MarkEvaluator(item, 'skipif')
+ exc = pytest.raises(pytest.fail.Exception, ev.istrue)
+ assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg
+
+ def test_skipif_class(self, testdir):
+ item, = testdir.getitems("""
+ import pytest
+ class TestClass:
+ pytestmark = pytest.mark.skipif("config._hackxyz")
+ def test_func(self):
+ pass
+ """)
+ item.config._hackxyz = 3
+ ev = MarkEvaluator(item, 'skipif')
+ assert ev.istrue()
+ expl = ev.getexplanation()
+ assert expl == "condition: config._hackxyz"
+
+
+class TestXFail:
+
+ @pytest.mark.parametrize('strict', [True, False])
+ def test_xfail_simple(self, testdir, strict):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xfail(strict=%s)
+ def test_func():
+ assert 0
+ """ % strict)
+ reports = runtestprotocol(item, log=False)
+ assert len(reports) == 3
+ callreport = reports[1]
+ assert callreport.skipped
+ assert callreport.wasxfail == ""
+
+ def test_xfail_xpassed(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xfail
+ def test_func():
+ assert 1
+ """)
+ reports = runtestprotocol(item, log=False)
+ assert len(reports) == 3
+ callreport = reports[1]
+ assert callreport.failed
+ assert callreport.wasxfail == ""
+
+ def test_xfail_run_anyway(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail
+ def test_func():
+ assert 0
+ def test_func2():
+ pytest.xfail("hello")
+ """)
+ result = testdir.runpytest("--runxfail")
+ result.stdout.fnmatch_lines([
+ "*def test_func():*",
+ "*assert 0*",
+ "*1 failed*1 pass*",
+ ])
+
+ def test_xfail_evalfalse_but_fails(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xfail('False')
+ def test_func():
+ assert 0
+ """)
+ reports = runtestprotocol(item, log=False)
+ callreport = reports[1]
+ assert callreport.failed
+ assert not hasattr(callreport, "wasxfail")
+ assert 'xfail' in callreport.keywords
+
+ def test_xfail_not_report_default(self, testdir):
+ p = testdir.makepyfile(test_one="""
+ import pytest
+ @pytest.mark.xfail
+ def test_this():
+ assert 0
+ """)
+ testdir.runpytest(p, '-v')
+ #result.stdout.fnmatch_lines([
+ # "*HINT*use*-r*"
+ #])
+
+ def test_xfail_not_run_xfail_reporting(self, testdir):
+ p = testdir.makepyfile(test_one="""
+ import pytest
+ @pytest.mark.xfail(run=False, reason="noway")
+ def test_this():
+ assert 0
+ @pytest.mark.xfail("True", run=False)
+ def test_this_true():
+ assert 0
+ @pytest.mark.xfail("False", run=False, reason="huh")
+ def test_this_false():
+ assert 1
+ """)
+ result = testdir.runpytest(p, '--report=xfailed', )
+ result.stdout.fnmatch_lines([
+ "*test_one*test_this*",
+ "*NOTRUN*noway",
+ "*test_one*test_this_true*",
+ "*NOTRUN*condition:*True*",
+ "*1 passed*",
+ ])
+
+ def test_xfail_not_run_no_setup_run(self, testdir):
+ p = testdir.makepyfile(test_one="""
+ import pytest
+ @pytest.mark.xfail(run=False, reason="hello")
+ def test_this():
+ assert 0
+ def setup_module(mod):
+ raise ValueError(42)
+ """)
+ result = testdir.runpytest(p, '--report=xfailed', )
+ result.stdout.fnmatch_lines([
+ "*test_one*test_this*",
+ "*NOTRUN*hello",
+ "*1 xfailed*",
+ ])
+
+ def test_xfail_xpass(self, testdir):
+ p = testdir.makepyfile(test_one="""
+ import pytest
+ @pytest.mark.xfail
+ def test_that():
+ assert 1
+ """)
+ result = testdir.runpytest(p, '-rX')
+ result.stdout.fnmatch_lines([
+ "*XPASS*test_that*",
+ "*1 xpassed*"
+ ])
+ assert result.ret == 0
+
+ def test_xfail_imperative(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def test_this():
+ pytest.xfail("hello")
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*1 xfailed*",
+ ])
+ result = testdir.runpytest(p, "-rx")
+ result.stdout.fnmatch_lines([
+ "*XFAIL*test_this*",
+ "*reason:*hello*",
+ ])
+ result = testdir.runpytest(p, "--runxfail")
+ result.stdout.fnmatch_lines("*1 pass*")
+
+ def test_xfail_imperative_in_setup_function(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def setup_function(function):
+ pytest.xfail("hello")
+
+ def test_this():
+ assert 0
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*1 xfailed*",
+ ])
+ result = testdir.runpytest(p, "-rx")
+ result.stdout.fnmatch_lines([
+ "*XFAIL*test_this*",
+ "*reason:*hello*",
+ ])
+ result = testdir.runpytest(p, "--runxfail")
+ result.stdout.fnmatch_lines("""
+ *def test_this*
+ *1 fail*
+ """)
+
+ def xtest_dynamic_xfail_set_during_setup(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def setup_function(function):
+ pytest.mark.xfail(function)
+ def test_this():
+ assert 0
+ def test_that():
+ assert 1
+ """)
+ result = testdir.runpytest(p, '-rxX')
+ result.stdout.fnmatch_lines([
+ "*XFAIL*test_this*",
+ "*XPASS*test_that*",
+ ])
+
+ def test_dynamic_xfail_no_run(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def pytest_funcarg__arg(request):
+ request.applymarker(pytest.mark.xfail(run=False))
+ def test_this(arg):
+ assert 0
+ """)
+ result = testdir.runpytest(p, '-rxX')
+ result.stdout.fnmatch_lines([
+ "*XFAIL*test_this*",
+ "*NOTRUN*",
+ ])
+
+ def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ def pytest_funcarg__arg(request):
+ request.applymarker(pytest.mark.xfail)
+ def test_this2(arg):
+ assert 0
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*1 xfailed*",
+ ])
+
+
+ @pytest.mark.parametrize('expected, actual, matchline',
+ [('TypeError', 'TypeError', "*1 xfailed*"),
+ ('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
+ ('TypeError', 'IndexError', "*1 failed*"),
+ ('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
+ ])
+ def test_xfail_raises(self, expected, actual, matchline, testdir):
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail(raises=%s)
+ def test_raises():
+ raise %s()
+ """ % (expected, actual))
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ matchline,
+ ])
+
+ def test_strict_sanity(self, testdir):
+ """sanity check for xfail(strict=True): a failing test should behave
+ exactly like a normal xfail.
+ """
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail(reason='unsupported feature', strict=True)
+ def test_foo():
+ assert 0
+ """)
+ result = testdir.runpytest(p, '-rxX')
+ result.stdout.fnmatch_lines([
+ '*XFAIL*',
+ '*unsupported feature*',
+ ])
+ assert result.ret == 0
+
+ @pytest.mark.parametrize('strict', [True, False])
+ def test_strict_xfail(self, testdir, strict):
+ p = testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.xfail(reason='unsupported feature', strict=%s)
+ def test_foo():
+ with open('foo_executed', 'w'): pass # make sure test executes
+ """ % strict)
+ result = testdir.runpytest(p, '-rxX')
+ if strict:
+ result.stdout.fnmatch_lines([
+ '*test_foo*',
+ '*XPASS(strict)*unsupported feature*',
+ ])
+ else:
+ result.stdout.fnmatch_lines([
+ '*test_strict_xfail*',
+ 'XPASS test_strict_xfail.py::test_foo unsupported feature',
+ ])
+ assert result.ret == (1 if strict else 0)
+ assert testdir.tmpdir.join('foo_executed').isfile()
+
+ @pytest.mark.parametrize('strict', [True, False])
+ def test_strict_xfail_condition(self, testdir, strict):
+ p = testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
+ def test_foo():
+ pass
+ """ % strict)
+ result = testdir.runpytest(p, '-rxX')
+ result.stdout.fnmatch_lines('*1 passed*')
+ assert result.ret == 0
+
+ @pytest.mark.parametrize('strict_val', ['true', 'false'])
+ def test_strict_xfail_default_from_file(self, testdir, strict_val):
+ testdir.makeini('''
+ [pytest]
+ xfail_strict = %s
+ ''' % strict_val)
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail(reason='unsupported feature')
+ def test_foo():
+ pass
+ """)
+ result = testdir.runpytest(p, '-rxX')
+ strict = strict_val == 'true'
+ result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
+ assert result.ret == (1 if strict else 0)
+
+
+class TestXFailwithSetupTeardown:
+ def test_failing_setup_issue9(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def setup_function(func):
+ assert 0
+
+ @pytest.mark.xfail
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 xfail*",
+ ])
+
+ def test_failing_teardown_issue9(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def teardown_function(func):
+ assert 0
+
+ @pytest.mark.xfail
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 xfail*",
+ ])
+
+
+class TestSkip:
+ def test_skip_class(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skip
+ class TestSomething(object):
+ def test_foo(self):
+ pass
+ def test_bar(self):
+ pass
+
+ def test_baz():
+ pass
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(skipped=2, passed=1)
+
+ def test_skips_on_false_string(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skip('False')
+ def test_foo():
+ pass
+ """)
+ rec = testdir.inline_run()
+ rec.assertoutcome(skipped=1)
+
+ def test_arg_as_reason(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skip('testing stuff')
+ def test_bar():
+ pass
+ """)
+ result = testdir.runpytest('-rs')
+ result.stdout.fnmatch_lines([
+ "*testing stuff*",
+ "*1 skipped*",
+ ])
+
+ def test_skip_no_reason(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skip
+ def test_foo():
+ pass
+ """)
+ result = testdir.runpytest('-rs')
+ result.stdout.fnmatch_lines([
+ "*unconditional skip*",
+ "*1 skipped*",
+ ])
+
+ def test_skip_with_reason(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skip(reason="for lolz")
+ def test_bar():
+ pass
+ """)
+ result = testdir.runpytest('-rs')
+ result.stdout.fnmatch_lines([
+ "*for lolz*",
+ "*1 skipped*",
+ ])
+
+ def test_only_skips_marked_test(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skip
+ def test_foo():
+ pass
+ @pytest.mark.skip(reason="nothing in particular")
+ def test_bar():
+ pass
+ def test_baz():
+ assert True
+ """)
+ result = testdir.runpytest('-rs')
+ result.stdout.fnmatch_lines([
+ "*nothing in particular*",
+ "*1 passed*2 skipped*",
+ ])
+
+class TestSkipif:
+ def test_skipif_conditional(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.skipif("hasattr(os, 'sep')")
+ def test_func():
+ pass
+ """) # noqa
+ x = pytest.raises(pytest.skip.Exception, lambda:
+ pytest_runtest_setup(item))
+ assert x.value.msg == "condition: hasattr(os, 'sep')"
+
+ @pytest.mark.parametrize('params', [
+ '"hasattr(sys, \'platform\')"',
+ 'True, reason="invalid platform"',
+ ])
+ def test_skipif_reporting(self, testdir, params):
+ p = testdir.makepyfile(test_foo="""
+ import pytest
+ @pytest.mark.skipif(%(params)s)
+ def test_that():
+ assert 0
+ """ % dict(params=params))
+ result = testdir.runpytest(p, '-s', '-rs')
+ result.stdout.fnmatch_lines([
+ "*SKIP*1*test_foo.py*platform*",
+ "*1 skipped*"
+ ])
+ assert result.ret == 0
+
+ @pytest.mark.parametrize('marker, msg1, msg2', [
+ ('skipif', 'SKIP', 'skipped'),
+ ('xfail', 'XPASS', 'xpassed'),
+ ])
+ def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):
+ testdir.makepyfile(test_foo="""
+ import pytest
+ @pytest.mark.{marker}(False, reason='first_condition')
+ @pytest.mark.{marker}(True, reason='second_condition')
+ def test_foobar():
+ assert 1
+ """.format(marker=marker))
+ result = testdir.runpytest('-s', '-rsxX')
+ result.stdout.fnmatch_lines([
+ "*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
+ "*1 {msg2}*".format(msg2=msg2),
+ ])
+ assert result.ret == 0
+
+
+def test_skip_not_report_default(testdir):
+ p = testdir.makepyfile(test_one="""
+ import pytest
+ def test_this():
+ pytest.skip("hello")
+ """)
+ result = testdir.runpytest(p, '-v')
+ result.stdout.fnmatch_lines([
+ #"*HINT*use*-r*",
+ "*1 skipped*",
+ ])
+
+
+def test_skipif_class(testdir):
+ p = testdir.makepyfile("""
+ import pytest
+
+ class TestClass:
+ pytestmark = pytest.mark.skipif("True")
+ def test_that(self):
+ assert 0
+ def test_though(self):
+ assert 0
+ """)
+ result = testdir.runpytest(p)
+ result.stdout.fnmatch_lines([
+ "*2 skipped*"
+ ])
+
+
+def test_skip_reasons_folding():
+ path = 'xyz'
+ lineno = 3
+ message = "justso"
+ longrepr = (path, lineno, message)
+
+ class X:
+ pass
+ ev1 = X()
+ ev1.when = "execute"
+ ev1.skipped = True
+ ev1.longrepr = longrepr
+
+ ev2 = X()
+ ev2.longrepr = longrepr
+ ev2.skipped = True
+
+ l = folded_skips([ev1, ev2])
+ assert len(l) == 1
+ num, fspath, lineno, reason = l[0]
+ assert num == 2
+ assert fspath == path
+ assert lineno == lineno
+ assert reason == message
+
+def test_skipped_reasons_functional(testdir):
+ testdir.makepyfile(
+ test_one="""
+ from conftest import doskip
+ def setup_function(func):
+ doskip()
+ def test_func():
+ pass
+ class TestClass:
+ def test_method(self):
+ doskip()
+ """,
+ test_two = """
+ from conftest import doskip
+ doskip()
+ """,
+ conftest = """
+ import pytest
+ def doskip():
+ pytest.skip('test')
+ """
+ )
+ result = testdir.runpytest('--report=skipped')
+ result.stdout.fnmatch_lines([
+ "*SKIP*3*conftest.py:3: test",
+ ])
+ assert result.ret == 0
+
+def test_reportchars(testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_1():
+ assert 0
+ @pytest.mark.xfail
+ def test_2():
+ assert 0
+ @pytest.mark.xfail
+ def test_3():
+ pass
+ def test_4():
+ pytest.skip("four")
+ """)
+ result = testdir.runpytest("-rfxXs")
+ result.stdout.fnmatch_lines([
+ "FAIL*test_1*",
+ "XFAIL*test_2*",
+ "XPASS*test_3*",
+ "SKIP*four*",
+ ])
+
+def test_reportchars_error(testdir):
+ testdir.makepyfile(
+ conftest="""
+ def pytest_runtest_teardown():
+ assert 0
+ """,
+ test_simple="""
+ def test_foo():
+ pass
+ """)
+ result = testdir.runpytest('-rE')
+ result.stdout.fnmatch_lines([
+ 'ERROR*test_foo*',
+ ])
+
+def test_reportchars_all(testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_1():
+ assert 0
+ @pytest.mark.xfail
+ def test_2():
+ assert 0
+ @pytest.mark.xfail
+ def test_3():
+ pass
+ def test_4():
+ pytest.skip("four")
+ """)
+ result = testdir.runpytest("-ra")
+ result.stdout.fnmatch_lines([
+ "FAIL*test_1*",
+ "SKIP*four*",
+ "XFAIL*test_2*",
+ "XPASS*test_3*",
+ ])
+
+def test_reportchars_all_error(testdir):
+ testdir.makepyfile(
+ conftest="""
+ def pytest_runtest_teardown():
+ assert 0
+ """,
+ test_simple="""
+ def test_foo():
+ pass
+ """)
+ result = testdir.runpytest('-ra')
+ result.stdout.fnmatch_lines([
+ 'ERROR*test_foo*',
+ ])
+
+@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
+def test_errors_in_xfail_skip_expressions(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skipif("asd")
+ def test_nameerror():
+ pass
+ @pytest.mark.xfail("syntax error")
+ def test_syntax():
+ pass
+
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest()
+ markline = " ^"
+ if sys.platform.startswith("java"):
+ # XXX report this to java
+ markline = "*" + markline[8:]
+ result.stdout.fnmatch_lines([
+ "*ERROR*test_nameerror*",
+ "*evaluating*skipif*expression*",
+ "*asd*",
+ "*ERROR*test_syntax*",
+ "*evaluating*xfail*expression*",
+ " syntax error",
+ markline,
+ "SyntaxError: invalid syntax",
+ "*1 pass*2 error*",
+ ])
+
+def test_xfail_skipif_with_globals(testdir):
+ testdir.makepyfile("""
+ import pytest
+ x = 3
+ @pytest.mark.skipif("x == 3")
+ def test_skip1():
+ pass
+ @pytest.mark.xfail("x == 3")
+ def test_boolean():
+ assert 0
+ """)
+ result = testdir.runpytest("-rsx")
+ result.stdout.fnmatch_lines([
+ "*SKIP*x == 3*",
+ "*XFAIL*test_boolean*",
+ "*x == 3*",
+ ])
+
+def test_direct_gives_error(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skipif(True)
+ def test_skip1():
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 error*",
+ ])
+
+
+def test_default_markers(testdir):
+ result = testdir.runpytest("--markers")
+ result.stdout.fnmatch_lines([
+ "*skipif(*condition)*skip*",
+ "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
+ ])
+
+def test_xfail_test_setup_exception(testdir):
+ testdir.makeconftest("""
+ def pytest_runtest_setup():
+ 0 / 0
+ """)
+ p = testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail
+ def test_func():
+ assert 0
+ """)
+ result = testdir.runpytest(p)
+ assert result.ret == 0
+ assert 'xfailed' in result.stdout.str()
+ assert 'xpassed' not in result.stdout.str()
+
+def test_imperativeskip_on_xfail_test(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail
+ def test_that_fails():
+ assert 0
+
+ @pytest.mark.skipif("True")
+ def test_hello():
+ pass
+ """)
+ testdir.makeconftest("""
+ import pytest
+ def pytest_runtest_setup(item):
+ pytest.skip("abc")
+ """)
+ result = testdir.runpytest("-rsxX")
+ result.stdout.fnmatch_lines_random("""
+ *SKIP*abc*
+ *SKIP*condition: True*
+ *2 skipped*
+ """)
+
+class TestBooleanCondition:
+ def test_skipif(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skipif(True, reason="True123")
+ def test_func1():
+ pass
+ @pytest.mark.skipif(False, reason="True123")
+ def test_func2():
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *1 passed*1 skipped*
+ """)
+
+ def test_skipif_noreason(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.skipif(True)
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest("-rs")
+ result.stdout.fnmatch_lines("""
+ *1 error*
+ """)
+
+ def test_xfail(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.xfail(True, reason="True123")
+ def test_func():
+ assert 0
+ """)
+ result = testdir.runpytest("-rxs")
+ result.stdout.fnmatch_lines("""
+ *XFAIL*
+ *True123*
+ *1 xfail*
+ """)
+
+
+def test_xfail_item(testdir):
+ # Ensure pytest.xfail works with non-Python Item
+ testdir.makeconftest("""
+ import pytest
+
+ class MyItem(pytest.Item):
+ nodeid = 'foo'
+ def runtest(self):
+ pytest.xfail("Expected Failure")
+
+ def pytest_collect_file(path, parent):
+ return MyItem("foo", parent)
+ """)
+ result = testdir.inline_run()
+ passed, skipped, failed = result.listoutcomes()
+ assert not failed
+ xfailed = [r for r in skipped if hasattr(r, 'wasxfail')]
+ assert xfailed
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_terminal.py b/testing/web-platform/tests/tools/pytest/testing/test_terminal.py
new file mode 100644
index 000000000..b43d6b379
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_terminal.py
@@ -0,0 +1,880 @@
+"""
+terminal reporting of the full testing process.
+"""
+import collections
+import sys
+
+import _pytest._pluggy as pluggy
+import _pytest._code
+import py
+import pytest
+from _pytest import runner
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
+from _pytest.terminal import build_summary_stats_line, _plugin_nameversions
+
+
+def basic_run_report(item):
+ runner.call_and_report(item, "setup", log=False)
+ return runner.call_and_report(item, "call", log=False)
+
+DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version'])
+
+
+class Option:
+ def __init__(self, verbose=False, fulltrace=False):
+ self.verbose = verbose
+ self.fulltrace = fulltrace
+
+ @property
+ def args(self):
+ l = []
+ if self.verbose:
+ l.append('-v')
+ if self.fulltrace:
+ l.append('--fulltrace')
+ return l
+
+def pytest_generate_tests(metafunc):
+ if "option" in metafunc.fixturenames:
+ metafunc.addcall(id="default",
+ funcargs={'option': Option(verbose=False)})
+ metafunc.addcall(id="verbose",
+ funcargs={'option': Option(verbose=True)})
+ metafunc.addcall(id="quiet",
+ funcargs={'option': Option(verbose= -1)})
+ metafunc.addcall(id="fulltrace",
+ funcargs={'option': Option(fulltrace=True)})
+
+
+@pytest.mark.parametrize('input,expected', [
+ ([DistInfo(project_name='test', version=1)], ['test-1']),
+ ([DistInfo(project_name='pytest-test', version=1)], ['test-1']),
+ ([
+ DistInfo(project_name='test', version=1),
+ DistInfo(project_name='test', version=1)
+ ], ['test-1']),
+], ids=['normal', 'prefix-strip', 'deduplicate'])
+def test_plugin_nameversion(input, expected):
+ pluginlist = [(None, x) for x in input]
+ result = _plugin_nameversions(pluginlist)
+ assert result == expected
+
+
+class TestTerminal:
+ def test_pass_skip_fail(self, testdir, option):
+ testdir.makepyfile("""
+ import pytest
+ def test_ok():
+ pass
+ def test_skip():
+ pytest.skip("xx")
+ def test_func():
+ assert 0
+ """)
+ result = testdir.runpytest(*option.args)
+ if option.verbose:
+ result.stdout.fnmatch_lines([
+ "*test_pass_skip_fail.py::test_ok PASS*",
+ "*test_pass_skip_fail.py::test_skip SKIP*",
+ "*test_pass_skip_fail.py::test_func FAIL*",
+ ])
+ else:
+ result.stdout.fnmatch_lines([
+ "*test_pass_skip_fail.py .sF"
+ ])
+ result.stdout.fnmatch_lines([
+ " def test_func():",
+ "> assert 0",
+ "E assert 0",
+ ])
+
+ def test_internalerror(self, testdir, linecomp):
+ modcol = testdir.getmodulecol("def test_one(): pass")
+ rep = TerminalReporter(modcol.config, file=linecomp.stringio)
+ excinfo = pytest.raises(ValueError, "raise ValueError('hello')")
+ rep.pytest_internalerror(excinfo.getrepr())
+ linecomp.assert_contains_lines([
+ "INTERNALERROR> *ValueError*hello*"
+ ])
+
+ def test_writeline(self, testdir, linecomp):
+ modcol = testdir.getmodulecol("def test_one(): pass")
+ rep = TerminalReporter(modcol.config, file=linecomp.stringio)
+ rep.write_fspath_result(modcol.nodeid, ".")
+ rep.write_line("hello world")
+ lines = linecomp.stringio.getvalue().split('\n')
+ assert not lines[0]
+ assert lines[1].endswith(modcol.name + " .")
+ assert lines[2] == "hello world"
+
+ def test_show_runtest_logstart(self, testdir, linecomp):
+ item = testdir.getitem("def test_func(): pass")
+ tr = TerminalReporter(item.config, file=linecomp.stringio)
+ item.config.pluginmanager.register(tr)
+ location = item.reportinfo()
+ tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid,
+ location=location, fspath=str(item.fspath))
+ linecomp.assert_contains_lines([
+ "*test_show_runtest_logstart.py*"
+ ])
+
+ def test_runtest_location_shown_before_test_starts(self, testdir):
+ testdir.makepyfile("""
+ def test_1():
+ import time
+ time.sleep(20)
+ """)
+ child = testdir.spawn_pytest("")
+ child.expect(".*test_runtest_location.*py")
+ child.sendeof()
+ child.kill(15)
+
+ def test_itemreport_subclasses_show_subclassed_file(self, testdir):
+ testdir.makepyfile(test_p1="""
+ class BaseTests:
+ def test_p1(self):
+ pass
+ class TestClass(BaseTests):
+ pass
+ """)
+ p2 = testdir.makepyfile(test_p2="""
+ from test_p1 import BaseTests
+ class TestMore(BaseTests):
+ pass
+ """)
+ result = testdir.runpytest(p2)
+ result.stdout.fnmatch_lines([
+ "*test_p2.py .",
+ "*1 passed*",
+ ])
+ result = testdir.runpytest("-v", p2)
+ result.stdout.fnmatch_lines([
+ "*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED",
+ ])
+
+ def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
+ a = testdir.mkpydir("a123")
+ a.join("test_hello123.py").write(_pytest._code.Source("""
+ class TestClass:
+ def test_method(self):
+ pass
+ """))
+ result = testdir.runpytest("-v")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*a123/test_hello123.py*PASS*",
+ ])
+ assert " <- " not in result.stdout.str()
+
+ def test_keyboard_interrupt(self, testdir, option):
+ testdir.makepyfile("""
+ def test_foobar():
+ assert 0
+ def test_spamegg():
+ import py; pytest.skip('skip me please!')
+ def test_interrupt_me():
+ raise KeyboardInterrupt # simulating the user
+ """)
+
+ result = testdir.runpytest(*option.args, no_reraise_ctrlc=True)
+ result.stdout.fnmatch_lines([
+ " def test_foobar():",
+ "> assert 0",
+ "E assert 0",
+ "*_keyboard_interrupt.py:6: KeyboardInterrupt*",
+ ])
+ if option.fulltrace:
+ result.stdout.fnmatch_lines([
+ "*raise KeyboardInterrupt # simulating the user*",
+ ])
+ else:
+ result.stdout.fnmatch_lines([
+ "to show a full traceback on KeyboardInterrupt use --fulltrace"
+ ])
+ result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
+
+ def test_keyboard_in_sessionstart(self, testdir):
+ testdir.makeconftest("""
+ def pytest_sessionstart():
+ raise KeyboardInterrupt
+ """)
+ testdir.makepyfile("""
+ def test_foobar():
+ pass
+ """)
+
+ result = testdir.runpytest(no_reraise_ctrlc=True)
+ assert result.ret == 2
+ result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
+
+
+class TestCollectonly:
+ def test_collectonly_basic(self, testdir):
+ testdir.makepyfile("""
+ def test_func():
+ pass
+ """)
+ result = testdir.runpytest("--collect-only",)
+ result.stdout.fnmatch_lines([
+ "<Module 'test_collectonly_basic.py'>",
+ " <Function 'test_func'>",
+ ])
+
+ def test_collectonly_skipped_module(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ pytest.skip("hello")
+ """)
+ result = testdir.runpytest("--collect-only", "-rs")
+ result.stdout.fnmatch_lines([
+ "SKIP*hello*",
+ "*1 skip*",
+ ])
+
+ def test_collectonly_failed_module(self, testdir):
+ testdir.makepyfile("""raise ValueError(0)""")
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*raise ValueError*",
+ "*1 error*",
+ ])
+
+ def test_collectonly_fatal(self, testdir):
+ testdir.makeconftest("""
+ def pytest_collectstart(collector):
+ assert 0, "urgs"
+ """)
+ result = testdir.runpytest("--collect-only")
+ result.stdout.fnmatch_lines([
+ "*INTERNAL*args*"
+ ])
+ assert result.ret == 3
+
+ def test_collectonly_simple(self, testdir):
+ p = testdir.makepyfile("""
+ def test_func1():
+ pass
+ class TestClass:
+ def test_method(self):
+ pass
+ """)
+ result = testdir.runpytest("--collect-only", p)
+ #assert stderr.startswith("inserting into sys.path")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*<Module '*.py'>",
+ "* <Function 'test_func1'*>",
+ "* <Class 'TestClass'>",
+ #"* <Instance '()'>",
+ "* <Function 'test_method'*>",
+ ])
+
+ def test_collectonly_error(self, testdir):
+ p = testdir.makepyfile("import Errlkjqweqwe")
+ result = testdir.runpytest("--collect-only", p)
+ assert result.ret == 1
+ result.stdout.fnmatch_lines(_pytest._code.Source("""
+ *ERROR*
+ *import Errlk*
+ *ImportError*
+ *1 error*
+ """).strip())
+
+ def test_collectonly_missing_path(self, testdir):
+ """this checks issue 115,
+ failure in parseargs will cause session
+ not to have the items attribute
+ """
+ result = testdir.runpytest("--collect-only", "uhm_missing_path")
+ assert result.ret == 4
+ result.stderr.fnmatch_lines([
+ '*ERROR: file not found*',
+ ])
+
+ def test_collectonly_quiet(self, testdir):
+ testdir.makepyfile("def test_foo(): pass")
+ result = testdir.runpytest("--collect-only", "-q")
+ result.stdout.fnmatch_lines([
+ '*test_foo*',
+ ])
+
+ def test_collectonly_more_quiet(self, testdir):
+ testdir.makepyfile(test_fun="def test_foo(): pass")
+ result = testdir.runpytest("--collect-only", "-qq")
+ result.stdout.fnmatch_lines([
+ '*test_fun.py: 1*',
+ ])
+
+
+def test_repr_python_version(monkeypatch):
+ try:
+ monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0))
+ assert repr_pythonversion() == "2.5.1-final-0"
+ py.std.sys.version_info = x = (2, 3)
+ assert repr_pythonversion() == str(x)
+ finally:
+ monkeypatch.undo() # do this early as pytest can get confused
+
+class TestFixtureReporting:
+ def test_setup_fixture_error(self, testdir):
+ testdir.makepyfile("""
+ def setup_function(function):
+ print ("setup func")
+ assert 0
+ def test_nada():
+ pass
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ERROR at setup of test_nada*",
+ "*setup_function(function):*",
+ "*setup func*",
+ "*assert 0*",
+ "*1 error*",
+ ])
+ assert result.ret != 0
+
+ def test_teardown_fixture_error(self, testdir):
+ testdir.makepyfile("""
+ def test_nada():
+ pass
+ def teardown_function(function):
+ print ("teardown func")
+ assert 0
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ERROR at teardown*",
+ "*teardown_function(function):*",
+ "*assert 0*",
+ "*Captured stdout*",
+ "*teardown func*",
+ "*1 passed*1 error*",
+ ])
+
+ def test_teardown_fixture_error_and_test_failure(self, testdir):
+ testdir.makepyfile("""
+ def test_fail():
+ assert 0, "failingfunc"
+
+ def teardown_function(function):
+ print ("teardown func")
+ assert False
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ERROR at teardown of test_fail*",
+ "*teardown_function(function):*",
+ "*assert False*",
+ "*Captured stdout*",
+ "*teardown func*",
+
+ "*test_fail*",
+ "*def test_fail():",
+ "*failingfunc*",
+ "*1 failed*1 error*",
+ ])
+
+class TestTerminalFunctional:
+ def test_deselected(self, testdir):
+ testpath = testdir.makepyfile("""
+ def test_one():
+ pass
+ def test_two():
+ pass
+ def test_three():
+ pass
+ """
+ )
+ result = testdir.runpytest("-k", "test_two:", testpath)
+ result.stdout.fnmatch_lines([
+ "*test_deselected.py ..",
+ "=* 1 test*deselected by*test_two:*=",
+ ])
+ assert result.ret == 0
+
+ def test_no_skip_summary_if_failure(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_ok():
+ pass
+ def test_fail():
+ assert 0
+ def test_skip():
+ pytest.skip("dontshow")
+ """)
+ result = testdir.runpytest()
+ assert result.stdout.str().find("skip test summary") == -1
+ assert result.ret == 1
+
+ def test_passes(self, testdir):
+ p1 = testdir.makepyfile("""
+ def test_passes():
+ pass
+ class TestClass:
+ def test_method(self):
+ pass
+ """)
+ old = p1.dirpath().chdir()
+ try:
+ result = testdir.runpytest()
+ finally:
+ old.chdir()
+ result.stdout.fnmatch_lines([
+ "test_passes.py ..",
+ "* 2 pass*",
+ ])
+ assert result.ret == 0
+
+ def test_header_trailer_info(self, testdir):
+ testdir.makepyfile("""
+ def test_passes():
+ pass
+ """)
+ result = testdir.runpytest()
+ verinfo = ".".join(map(str, py.std.sys.version_info[:3]))
+ result.stdout.fnmatch_lines([
+ "*===== test session starts ====*",
+ "platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s" % (
+ py.std.sys.platform, verinfo,
+ pytest.__version__, py.__version__, pluggy.__version__),
+ "*test_header_trailer_info.py .",
+ "=* 1 passed*in *.[0-9][0-9] seconds *=",
+ ])
+ if pytest.config.pluginmanager.list_plugin_distinfo():
+ result.stdout.fnmatch_lines([
+ "plugins: *",
+ ])
+
+ def test_showlocals(self, testdir):
+ p1 = testdir.makepyfile("""
+ def test_showlocals():
+ x = 3
+ y = "x" * 5000
+ assert 0
+ """)
+ result = testdir.runpytest(p1, '-l')
+ result.stdout.fnmatch_lines([
+ #"_ _ * Locals *",
+ "x* = 3",
+ "y* = 'xxxxxx*"
+ ])
+
+ def test_verbose_reporting(self, testdir, pytestconfig):
+ p1 = testdir.makepyfile("""
+ import pytest
+ def test_fail():
+ raise ValueError()
+ def test_pass():
+ pass
+ class TestClass:
+ def test_skip(self):
+ pytest.skip("hello")
+ def test_gen():
+ def check(x):
+ assert x == 1
+ yield check, 0
+ """)
+ result = testdir.runpytest(p1, '-v')
+ result.stdout.fnmatch_lines([
+ "*test_verbose_reporting.py::test_fail *FAIL*",
+ "*test_verbose_reporting.py::test_pass *PASS*",
+ "*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
+ "*test_verbose_reporting.py::test_gen*0* *FAIL*",
+ ])
+ assert result.ret == 1
+
+ if not pytestconfig.pluginmanager.get_plugin("xdist"):
+ pytest.skip("xdist plugin not installed")
+
+ result = testdir.runpytest(p1, '-v', '-n 1')
+ result.stdout.fnmatch_lines([
+ "*FAIL*test_verbose_reporting.py::test_fail*",
+ ])
+ assert result.ret == 1
+
+ def test_quiet_reporting(self, testdir):
+ p1 = testdir.makepyfile("def test_pass(): pass")
+ result = testdir.runpytest(p1, '-q')
+ s = result.stdout.str()
+ assert 'test session starts' not in s
+ assert p1.basename not in s
+ assert "===" not in s
+ assert "passed" in s
+
+ def test_more_quiet_reporting(self, testdir):
+ p1 = testdir.makepyfile("def test_pass(): pass")
+ result = testdir.runpytest(p1, '-qq')
+ s = result.stdout.str()
+ assert 'test session starts' not in s
+ assert p1.basename not in s
+ assert "===" not in s
+ assert "passed" not in s
+
+
+def test_fail_extra_reporting(testdir):
+ testdir.makepyfile("def test_this(): assert 0")
+ result = testdir.runpytest()
+ assert 'short test summary' not in result.stdout.str()
+ result = testdir.runpytest('-rf')
+ result.stdout.fnmatch_lines([
+ "*test summary*",
+ "FAIL*test_fail_extra_reporting*",
+ ])
+
+def test_fail_reporting_on_pass(testdir):
+ testdir.makepyfile("def test_this(): assert 1")
+ result = testdir.runpytest('-rf')
+ assert 'short test summary' not in result.stdout.str()
+
+def test_pass_extra_reporting(testdir):
+ testdir.makepyfile("def test_this(): assert 1")
+ result = testdir.runpytest()
+ assert 'short test summary' not in result.stdout.str()
+ result = testdir.runpytest('-rp')
+ result.stdout.fnmatch_lines([
+ "*test summary*",
+ "PASS*test_pass_extra_reporting*",
+ ])
+
+def test_pass_reporting_on_fail(testdir):
+ testdir.makepyfile("def test_this(): assert 0")
+ result = testdir.runpytest('-rp')
+ assert 'short test summary' not in result.stdout.str()
+
+def test_pass_output_reporting(testdir):
+ testdir.makepyfile("""
+ def test_pass_output():
+ print("Four score and seven years ago...")
+ """)
+ result = testdir.runpytest()
+ assert 'Four score and seven years ago...' not in result.stdout.str()
+ result = testdir.runpytest('-rP')
+ result.stdout.fnmatch_lines([
+ "Four score and seven years ago...",
+ ])
+
+def test_color_yes(testdir):
+ testdir.makepyfile("def test_this(): assert 1")
+ result = testdir.runpytest('--color=yes')
+ assert 'test session starts' in result.stdout.str()
+ assert '\x1b[1m' in result.stdout.str()
+
+
+def test_color_no(testdir):
+ testdir.makepyfile("def test_this(): assert 1")
+ result = testdir.runpytest('--color=no')
+ assert 'test session starts' in result.stdout.str()
+ assert '\x1b[1m' not in result.stdout.str()
+
+
+@pytest.mark.parametrize('verbose', [True, False])
+def test_color_yes_collection_on_non_atty(testdir, verbose):
+ """skip collect progress report when working on non-terminals.
+ #1397
+ """
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize('i', range(10))
+ def test_this(i):
+ assert 1
+ """)
+ args = ['--color=yes']
+ if verbose:
+ args.append('-vv')
+ result = testdir.runpytest(*args)
+ assert 'test session starts' in result.stdout.str()
+ assert '\x1b[1m' in result.stdout.str()
+ assert 'collecting 10 items' not in result.stdout.str()
+ if verbose:
+ assert 'collecting ...' in result.stdout.str()
+ assert 'collected 10 items' in result.stdout.str()
+
+
+def test_getreportopt():
+ class config:
+ class option:
+ reportchars = ""
+ config.option.report = "xfailed"
+ assert getreportopt(config) == "x"
+
+ config.option.report = "xfailed,skipped"
+ assert getreportopt(config) == "xs"
+
+ config.option.report = "skipped,xfailed"
+ assert getreportopt(config) == "sx"
+
+ config.option.report = "skipped"
+ config.option.reportchars = "sf"
+ assert getreportopt(config) == "sf"
+
+ config.option.reportchars = "sfx"
+ assert getreportopt(config) == "sfx"
+
+def test_terminalreporter_reportopt_addopts(testdir):
+ testdir.makeini("[pytest]\naddopts=-rs")
+ testdir.makepyfile("""
+ def pytest_funcarg__tr(request):
+ tr = request.config.pluginmanager.getplugin("terminalreporter")
+ return tr
+ def test_opt(tr):
+ assert tr.hasopt('skipped')
+ assert not tr.hasopt('qwe')
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*1 passed*"
+ ])
+
+def test_tbstyle_short(testdir):
+ p = testdir.makepyfile("""
+ def pytest_funcarg__arg(request):
+ return 42
+ def test_opt(arg):
+ x = 0
+ assert x
+ """)
+ result = testdir.runpytest("--tb=short")
+ s = result.stdout.str()
+ assert 'arg = 42' not in s
+ assert 'x = 0' not in s
+ result.stdout.fnmatch_lines([
+ "*%s:5*" % p.basename,
+ " assert x",
+ "E assert*",
+ ])
+ result = testdir.runpytest()
+ s = result.stdout.str()
+ assert 'x = 0' in s
+ assert 'assert x' in s
+
+def test_traceconfig(testdir, monkeypatch):
+ result = testdir.runpytest("--traceconfig")
+ result.stdout.fnmatch_lines([
+ "*active plugins*"
+ ])
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+
+class TestGenericReporting:
+ """ this test class can be subclassed with a different option
+ provider to run e.g. distributed tests.
+ """
+ def test_collect_fail(self, testdir, option):
+ testdir.makepyfile("import xyz\n")
+ result = testdir.runpytest(*option.args)
+ result.stdout.fnmatch_lines([
+ "? import xyz",
+ "E ImportError: No module named *xyz*",
+ "*1 error*",
+ ])
+
+ def test_maxfailures(self, testdir, option):
+ testdir.makepyfile("""
+ def test_1():
+ assert 0
+ def test_2():
+ assert 0
+ def test_3():
+ assert 0
+ """)
+ result = testdir.runpytest("--maxfail=2", *option.args)
+ result.stdout.fnmatch_lines([
+ "*def test_1():*",
+ "*def test_2():*",
+ "*!! Interrupted: stopping after 2 failures*!!*",
+ "*2 failed*",
+ ])
+
+
+ def test_tb_option(self, testdir, option):
+ testdir.makepyfile("""
+ import pytest
+ def g():
+ raise IndexError
+ def test_func():
+ print (6*7)
+ g() # --calling--
+ """)
+ for tbopt in ["long", "short", "no"]:
+ print('testing --tb=%s...' % tbopt)
+ result = testdir.runpytest('--tb=%s' % tbopt)
+ s = result.stdout.str()
+ if tbopt == "long":
+ assert 'print (6*7)' in s
+ else:
+ assert 'print (6*7)' not in s
+ if tbopt != "no":
+ assert '--calling--' in s
+ assert 'IndexError' in s
+ else:
+ assert 'FAILURES' not in s
+ assert '--calling--' not in s
+ assert 'IndexError' not in s
+
+ def test_tb_crashline(self, testdir, option):
+ p = testdir.makepyfile("""
+ import pytest
+ def g():
+ raise IndexError
+ def test_func1():
+ print (6*7)
+ g() # --calling--
+ def test_func2():
+ assert 0, "hello"
+ """)
+ result = testdir.runpytest("--tb=line")
+ bn = p.basename
+ result.stdout.fnmatch_lines([
+ "*%s:3: IndexError*" % bn,
+ "*%s:8: AssertionError: hello*" % bn,
+ ])
+ s = result.stdout.str()
+ assert "def test_func2" not in s
+
+ def test_pytest_report_header(self, testdir, option):
+ testdir.makeconftest("""
+ def pytest_sessionstart(session):
+ session.config._somevalue = 42
+ def pytest_report_header(config):
+ return "hello: %s" % config._somevalue
+ """)
+ testdir.mkdir("a").join("conftest.py").write("""
+def pytest_report_header(config, startdir):
+ return ["line1", str(startdir)]
+""")
+ result = testdir.runpytest("a")
+ result.stdout.fnmatch_lines([
+ "*hello: 42*",
+ "line1",
+ str(testdir.tmpdir),
+ ])
+
+@pytest.mark.xfail("not hasattr(os, 'dup')")
+def test_fdopen_kept_alive_issue124(testdir):
+ testdir.makepyfile("""
+ import os, sys
+ k = []
+ def test_open_file_and_keep_alive(capfd):
+ stdout = os.fdopen(1, 'w', 1)
+ k.append(stdout)
+
+ def test_close_kept_alive_file():
+ stdout = k.pop()
+ stdout.close()
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 passed*"
+ ])
+
+def test_tbstyle_native_setup_error(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture
+ def setup_error_fixture():
+ raise Exception("error in exception")
+
+ def test_error_fixture(setup_error_fixture):
+ pass
+ """)
+ result = testdir.runpytest("--tb=native")
+ result.stdout.fnmatch_lines([
+ '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*'
+ ])
+
+def test_terminal_summary(testdir):
+ testdir.makeconftest("""
+ def pytest_terminal_summary(terminalreporter):
+ w = terminalreporter
+ w.section("hello")
+ w.line("world")
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("""
+ *==== hello ====*
+ world
+ """)
+
+
+def test_terminal_summary_warnings_are_displayed(testdir):
+ """Test that warnings emitted during pytest_terminal_summary are displayed.
+ (#1305).
+ """
+ testdir.makeconftest("""
+ def pytest_terminal_summary(terminalreporter):
+ config = terminalreporter.config
+ config.warn('C1', 'internal warning')
+ """)
+ result = testdir.runpytest('-rw')
+ result.stdout.fnmatch_lines([
+ '*C1*internal warning',
+ '*== 1 pytest-warnings in *',
+ ])
+
+
+@pytest.mark.parametrize("exp_color, exp_line, stats_arg", [
+ # The method under test only cares about the length of each
+ # dict value, not the actual contents, so tuples of anything
+ # suffice
+
+ # Important statuses -- the highest priority of these always wins
+ ("red", "1 failed", {"failed": (1,)}),
+ ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
+
+ ("red", "1 error", {"error": (1,)}),
+ ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
+
+ # (a status that's not known to the code)
+ ("yellow", "1 weird", {"weird": (1,)}),
+ ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
+
+ ("yellow", "1 pytest-warnings", {"warnings": (1,)}),
+ ("yellow", "1 passed, 1 pytest-warnings", {"warnings": (1,),
+ "passed": (1,)}),
+
+ ("green", "5 passed", {"passed": (1,2,3,4,5)}),
+
+
+ # "Boring" statuses. These have no effect on the color of the summary
+ # line. Thus, if *every* test has a boring status, the summary line stays
+ # at its default color, i.e. yellow, to warn the user that the test run
+ # produced no useful information
+ ("yellow", "1 skipped", {"skipped": (1,)}),
+ ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),
+
+ ("yellow", "1 deselected", {"deselected": (1,)}),
+ ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),
+
+ ("yellow", "1 xfailed", {"xfailed": (1,)}),
+ ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),
+
+ ("yellow", "1 xpassed", {"xpassed": (1,)}),
+ ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),
+
+ # Likewise if no tests were found at all
+ ("yellow", "no tests ran", {}),
+
+ # Test the empty-key special case
+ ("yellow", "no tests ran", {"": (1,)}),
+ ("green", "1 passed", {"": (1,), "passed": (1,)}),
+
+
+ # A couple more complex combinations
+ ("red", "1 failed, 2 passed, 3 xfailed",
+ {"passed": (1,2), "failed": (1,), "xfailed": (1,2,3)}),
+
+ ("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed",
+ {"passed": (1,),
+ "skipped": (1,2),
+ "deselected": (1,2,3),
+ "xfailed": (1,2)}),
+])
+def test_summary_stats(exp_line, exp_color, stats_arg):
+ print("Based on stats: %s" % stats_arg)
+ print("Expect summary: \"%s\"; with color \"%s\"" % (exp_line, exp_color))
+ (line, color) = build_summary_stats_line(stats_arg)
+ print("Actually got: \"%s\"; with color \"%s\"" % (line, color))
+ assert line == exp_line
+ assert color == exp_color
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_tmpdir.py b/testing/web-platform/tests/tools/pytest/testing/test_tmpdir.py
new file mode 100644
index 000000000..d514e722e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_tmpdir.py
@@ -0,0 +1,183 @@
+import sys
+import py
+import pytest
+
+from _pytest.tmpdir import tmpdir
+
+def test_funcarg(testdir):
+ testdir.makepyfile("""
+ def pytest_generate_tests(metafunc):
+ metafunc.addcall(id='a')
+ metafunc.addcall(id='b')
+ def test_func(tmpdir): pass
+ """)
+ from _pytest.tmpdir import TempdirFactory
+ reprec = testdir.inline_run()
+ calls = reprec.getcalls("pytest_runtest_setup")
+ item = calls[0].item
+ config = item.config
+ tmpdirhandler = TempdirFactory(config)
+ item._initrequest()
+ p = tmpdir(item._request, tmpdirhandler)
+ assert p.check()
+ bn = p.basename.strip("0123456789")
+ assert bn.endswith("test_func_a_")
+ item.name = "qwe/\\abc"
+ p = tmpdir(item._request, tmpdirhandler)
+ assert p.check()
+ bn = p.basename.strip("0123456789")
+ assert bn == "qwe__abc"
+
+def test_ensuretemp(recwarn):
+ #pytest.deprecated_call(pytest.ensuretemp, 'hello')
+ d1 = pytest.ensuretemp('hello')
+ d2 = pytest.ensuretemp('hello')
+ assert d1 == d2
+ assert d1.check(dir=1)
+
+class TestTempdirHandler:
+ def test_mktemp(self, testdir):
+ from _pytest.tmpdir import TempdirFactory
+ config = testdir.parseconfig()
+ config.option.basetemp = testdir.mkdir("hello")
+ t = TempdirFactory(config)
+ tmp = t.mktemp("world")
+ assert tmp.relto(t.getbasetemp()) == "world0"
+ tmp = t.mktemp("this")
+ assert tmp.relto(t.getbasetemp()).startswith("this")
+ tmp2 = t.mktemp("this")
+ assert tmp2.relto(t.getbasetemp()).startswith("this")
+ assert tmp2 != tmp
+
+class TestConfigTmpdir:
+ def test_getbasetemp_custom_removes_old(self, testdir):
+ mytemp = testdir.tmpdir.join("xyz")
+ p = testdir.makepyfile("""
+ def test_1(tmpdir):
+ pass
+ """)
+ testdir.runpytest(p, '--basetemp=%s' % mytemp)
+ mytemp.check()
+ mytemp.ensure("hello")
+
+ testdir.runpytest(p, '--basetemp=%s' % mytemp)
+ mytemp.check()
+ assert not mytemp.join("hello").check()
+
+
+def test_basetemp(testdir):
+ mytemp = testdir.tmpdir.mkdir("mytemp")
+ p = testdir.makepyfile("""
+ import pytest
+ def test_1():
+ pytest.ensuretemp("hello")
+ """)
+ result = testdir.runpytest(p, '--basetemp=%s' % mytemp)
+ assert result.ret == 0
+ assert mytemp.join('hello').check()
+
+@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
+ reason="symlink not available on this platform")
+def test_tmpdir_always_is_realpath(testdir):
+ # the reason why tmpdir should be a realpath is that
+ # when you cd to it and do "os.getcwd()" you will anyway
+ # get the realpath. Using the symlinked path can thus
+ # easily result in path-inequality
+ # XXX if that proves to be a problem, consider using
+ # os.environ["PWD"]
+ realtemp = testdir.tmpdir.mkdir("myrealtemp")
+ linktemp = testdir.tmpdir.join("symlinktemp")
+ linktemp.mksymlinkto(realtemp)
+ p = testdir.makepyfile("""
+ def test_1(tmpdir):
+ import os
+ assert os.path.realpath(str(tmpdir)) == str(tmpdir)
+ """)
+ result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp)
+ assert not result.ret
+
+
+def test_tmpdir_too_long_on_parametrization(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.mark.parametrize("arg", ["1"*1000])
+ def test_some(arg, tmpdir):
+ tmpdir.ensure("hello")
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+def test_tmpdir_factory(testdir):
+ testdir.makepyfile("""
+ import pytest
+ @pytest.fixture(scope='session')
+ def session_dir(tmpdir_factory):
+ return tmpdir_factory.mktemp('data', numbered=False)
+ def test_some(session_dir):
+ session_dir.isdir()
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
+ """Test that tmpdir works even if environment variables required by getpass
+ module are missing (#1010).
+ """
+ monkeypatch.delenv('USER', raising=False)
+ monkeypatch.delenv('USERNAME', raising=False)
+ testdir.makepyfile("""
+ import pytest
+ def test_some(tmpdir):
+ assert tmpdir.isdir()
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+@pytest.fixture
+def break_getuser(monkeypatch):
+ monkeypatch.setattr('os.getuid', lambda: -1)
+ # taken from python 2.7/3.4
+ for envvar in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
+ monkeypatch.delenv(envvar, raising=False)
+
+
+@pytest.mark.usefixtures("break_getuser")
+@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
+def test_tmpdir_fallback_uid_not_found(testdir):
+ """Test that tmpdir works even if the current process's user id does not
+ correspond to a valid user.
+ """
+
+ testdir.makepyfile("""
+ import pytest
+ def test_some(tmpdir):
+ assert tmpdir.isdir()
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+
+@pytest.mark.usefixtures("break_getuser")
+@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
+def test_get_user_uid_not_found():
+ """Test that get_user() function works even if the current process's
+ user id does not correspond to a valid user (e.g. running pytest in a
+ Docker container with 'docker run -u'.
+ """
+ from _pytest.tmpdir import get_user
+ assert get_user() is None
+
+
+@pytest.mark.skipif(not sys.platform.startswith('win'), reason='win only')
+def test_get_user(monkeypatch):
+ """Test that get_user() function works even if environment variables
+ required by getpass module are missing from the environment on Windows
+ (#1010).
+ """
+ from _pytest.tmpdir import get_user
+ monkeypatch.delenv('USER', raising=False)
+ monkeypatch.delenv('USERNAME', raising=False)
+ assert get_user() is None
diff --git a/testing/web-platform/tests/tools/pytest/testing/test_unittest.py b/testing/web-platform/tests/tools/pytest/testing/test_unittest.py
new file mode 100644
index 000000000..144aad79b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/testing/test_unittest.py
@@ -0,0 +1,737 @@
+from _pytest.main import EXIT_NOTESTSCOLLECTED
+import pytest
+
+def test_simple_unittest(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ def testpassing(self):
+ self.assertEquals('foo', 'foo')
+ def test_failing(self):
+ self.assertEquals('foo', 'bar')
+ """)
+ reprec = testdir.inline_run(testpath)
+ assert reprec.matchreport("testpassing").passed
+ assert reprec.matchreport("test_failing").failed
+
+def test_runTest_method(testdir):
+ testdir.makepyfile("""
+ import unittest
+ class MyTestCaseWithRunTest(unittest.TestCase):
+ def runTest(self):
+ self.assertEquals('foo', 'foo')
+ class MyTestCaseWithoutRunTest(unittest.TestCase):
+ def runTest(self):
+ self.assertEquals('foo', 'foo')
+ def test_something(self):
+ pass
+ """)
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines("""
+ *MyTestCaseWithRunTest::runTest*
+ *MyTestCaseWithoutRunTest::test_something*
+ *2 passed*
+ """)
+
+def test_isclasscheck_issue53(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ class _E(object):
+ def __getattr__(self, tag):
+ pass
+ E = _E()
+ """)
+ result = testdir.runpytest(testpath)
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+def test_setup(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ def setUp(self):
+ self.foo = 1
+ def setup_method(self, method):
+ self.foo2 = 1
+ def test_both(self):
+ self.assertEquals(1, self.foo)
+ assert self.foo2 == 1
+ def teardown_method(self, method):
+ assert 0, "42"
+
+ """)
+ reprec = testdir.inline_run("-s", testpath)
+ assert reprec.matchreport("test_both", when="call").passed
+ rep = reprec.matchreport("test_both", when="teardown")
+ assert rep.failed and '42' in str(rep.longrepr)
+
+def test_setUpModule(testdir):
+ testpath = testdir.makepyfile("""
+ l = []
+
+ def setUpModule():
+ l.append(1)
+
+ def tearDownModule():
+ del l[0]
+
+ def test_hello():
+ assert l == [1]
+
+ def test_world():
+ assert l == [1]
+ """)
+ result = testdir.runpytest(testpath)
+ result.stdout.fnmatch_lines([
+ "*2 passed*",
+ ])
+
+def test_setUpModule_failing_no_teardown(testdir):
+ testpath = testdir.makepyfile("""
+ l = []
+
+ def setUpModule():
+ 0/0
+
+ def tearDownModule():
+ l.append(1)
+
+ def test_hello():
+ pass
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=0, failed=1)
+ call = reprec.getcalls("pytest_runtest_setup")[0]
+ assert not call.item.module.l
+
+def test_new_instances(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ def test_func1(self):
+ self.x = 2
+ def test_func2(self):
+ assert not hasattr(self, 'x')
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=2)
+
+def test_teardown(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ l = []
+ def test_one(self):
+ pass
+ def tearDown(self):
+ self.l.append(None)
+ class Second(unittest.TestCase):
+ def test_check(self):
+ self.assertEquals(MyTestCase.l, [None])
+ """)
+ reprec = testdir.inline_run(testpath)
+ passed, skipped, failed = reprec.countoutcomes()
+ assert failed == 0, failed
+ assert passed == 2
+ assert passed + skipped + failed == 2
+
+@pytest.mark.skipif("sys.version_info < (2,7)")
+def test_unittest_skip_issue148(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+
+ @unittest.skip("hello")
+ class MyTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(self):
+ xxx
+ def test_one(self):
+ pass
+ @classmethod
+ def tearDownClass(self):
+ xxx
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(skipped=1)
+
+def test_method_and_teardown_failing_reporting(testdir):
+ testdir.makepyfile("""
+ import unittest, pytest
+ class TC(unittest.TestCase):
+ def tearDown(self):
+ assert 0, "down1"
+ def test_method(self):
+ assert False, "down2"
+ """)
+ result = testdir.runpytest("-s")
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*tearDown*",
+ "*assert 0*",
+ "*test_method*",
+ "*assert False*",
+ "*1 failed*1 error*",
+ ])
+
+def test_setup_failure_is_shown(testdir):
+ testdir.makepyfile("""
+ import unittest
+ import pytest
+ class TC(unittest.TestCase):
+ def setUp(self):
+ assert 0, "down1"
+ def test_method(self):
+ print ("never42")
+ xyz
+ """)
+ result = testdir.runpytest("-s")
+ assert result.ret == 1
+ result.stdout.fnmatch_lines([
+ "*setUp*",
+ "*assert 0*down1*",
+ "*1 failed*",
+ ])
+ assert 'never42' not in result.stdout.str()
+
+def test_setup_setUpClass(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ import pytest
+ class MyTestCase(unittest.TestCase):
+ x = 0
+ @classmethod
+ def setUpClass(cls):
+ cls.x += 1
+ def test_func1(self):
+ assert self.x == 1
+ def test_func2(self):
+ assert self.x == 1
+ @classmethod
+ def tearDownClass(cls):
+ cls.x -= 1
+ def test_teareddown():
+ assert MyTestCase.x == 0
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=3)
+
+def test_setup_class(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ import pytest
+ class MyTestCase(unittest.TestCase):
+ x = 0
+ def setup_class(cls):
+ cls.x += 1
+ def test_func1(self):
+ assert self.x == 1
+ def test_func2(self):
+ assert self.x == 1
+ def teardown_class(cls):
+ cls.x -= 1
+ def test_teareddown():
+ assert MyTestCase.x == 0
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=3)
+
+
+@pytest.mark.parametrize("type", ['Error', 'Failure'])
+def test_testcase_adderrorandfailure_defers(testdir, type):
+ testdir.makepyfile("""
+ from unittest import TestCase
+ import pytest
+ class MyTestCase(TestCase):
+ def run(self, result):
+ excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
+ try:
+ result.add%s(self, excinfo._excinfo)
+ except KeyboardInterrupt:
+ raise
+ except:
+ pytest.fail("add%s should not raise")
+ def test_hello(self):
+ pass
+ """ % (type, type))
+ result = testdir.runpytest()
+ assert 'should not raise' not in result.stdout.str()
+
+@pytest.mark.parametrize("type", ['Error', 'Failure'])
+def test_testcase_custom_exception_info(testdir, type):
+ testdir.makepyfile("""
+ from unittest import TestCase
+ import py, pytest
+ import _pytest._code
+ class MyTestCase(TestCase):
+ def run(self, result):
+ excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
+ # we fake an incompatible exception info
+ from _pytest.monkeypatch import monkeypatch
+ mp = monkeypatch()
+ def t(*args):
+ mp.undo()
+ raise TypeError()
+ mp.setattr(_pytest._code, 'ExceptionInfo', t)
+ try:
+ excinfo = excinfo._excinfo
+ result.add%(type)s(self, excinfo)
+ finally:
+ mp.undo()
+ def test_hello(self):
+ pass
+ """ % locals())
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "NOTE: Incompatible Exception Representation*",
+ "*ZeroDivisionError*",
+ "*1 failed*",
+ ])
+
+def test_testcase_totally_incompatible_exception_info(testdir):
+ item, = testdir.getitems("""
+ from unittest import TestCase
+ class MyTestCase(TestCase):
+ def test_hello(self):
+ pass
+ """)
+ item.addError(None, 42)
+ excinfo = item._excinfo.pop(0)
+ assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
+
+def test_module_level_pytestmark(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+ import pytest
+ pytestmark = pytest.mark.xfail
+ class MyTestCase(unittest.TestCase):
+ def test_func1(self):
+ assert 0
+ """)
+ reprec = testdir.inline_run(testpath, "-s")
+ reprec.assertoutcome(skipped=1)
+
+
+def test_trial_testcase_skip_property(testdir):
+ pytest.importorskip('twisted.trial.unittest')
+ testpath = testdir.makepyfile("""
+ from twisted.trial import unittest
+ class MyTestCase(unittest.TestCase):
+ skip = 'dont run'
+ def test_func(self):
+ pass
+ """)
+ reprec = testdir.inline_run(testpath, "-s")
+ reprec.assertoutcome(skipped=1)
+
+
+def test_trial_testfunction_skip_property(testdir):
+ pytest.importorskip('twisted.trial.unittest')
+ testpath = testdir.makepyfile("""
+ from twisted.trial import unittest
+ class MyTestCase(unittest.TestCase):
+ def test_func(self):
+ pass
+ test_func.skip = 'dont run'
+ """)
+ reprec = testdir.inline_run(testpath, "-s")
+ reprec.assertoutcome(skipped=1)
+
+
+def test_trial_testcase_todo_property(testdir):
+ pytest.importorskip('twisted.trial.unittest')
+ testpath = testdir.makepyfile("""
+ from twisted.trial import unittest
+ class MyTestCase(unittest.TestCase):
+ todo = 'dont run'
+ def test_func(self):
+ assert 0
+ """)
+ reprec = testdir.inline_run(testpath, "-s")
+ reprec.assertoutcome(skipped=1)
+
+
+def test_trial_testfunction_todo_property(testdir):
+ pytest.importorskip('twisted.trial.unittest')
+ testpath = testdir.makepyfile("""
+ from twisted.trial import unittest
+ class MyTestCase(unittest.TestCase):
+ def test_func(self):
+ assert 0
+ test_func.todo = 'dont run'
+ """)
+ reprec = testdir.inline_run(testpath, "-s")
+ reprec.assertoutcome(skipped=1)
+
+
+class TestTrialUnittest:
+ def setup_class(cls):
+ cls.ut = pytest.importorskip("twisted.trial.unittest")
+
+ def test_trial_testcase_runtest_not_collected(self, testdir):
+ testdir.makepyfile("""
+ from twisted.trial.unittest import TestCase
+
+ class TC(TestCase):
+ def test_hello(self):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+ testdir.makepyfile("""
+ from twisted.trial.unittest import TestCase
+
+ class TC(TestCase):
+ def runTest(self):
+ pass
+ """)
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(passed=1)
+
+ def test_trial_exceptions_with_skips(self, testdir):
+ testdir.makepyfile("""
+ from twisted.trial import unittest
+ import pytest
+ class TC(unittest.TestCase):
+ def test_hello(self):
+ pytest.skip("skip_in_method")
+ @pytest.mark.skipif("sys.version_info != 1")
+ def test_hello2(self):
+ pass
+ @pytest.mark.xfail(reason="iwanto")
+ def test_hello3(self):
+ assert 0
+ def test_hello4(self):
+ pytest.xfail("i2wanto")
+ def test_trial_skip(self):
+ pass
+ test_trial_skip.skip = "trialselfskip"
+
+ def test_trial_todo(self):
+ assert 0
+ test_trial_todo.todo = "mytodo"
+
+ def test_trial_todo_success(self):
+ pass
+ test_trial_todo_success.todo = "mytodo"
+
+ class TC2(unittest.TestCase):
+ def setup_class(cls):
+ pytest.skip("skip_in_setup_class")
+ def test_method(self):
+ pass
+ """)
+ result = testdir.runpytest("-rxs")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines_random([
+ "*XFAIL*test_trial_todo*",
+ "*trialselfskip*",
+ "*skip_in_setup_class*",
+ "*iwanto*",
+ "*i2wanto*",
+ "*sys.version_info*",
+ "*skip_in_method*",
+ "*4 skipped*3 xfail*1 xpass*",
+ ])
+
+ def test_trial_error(self, testdir):
+ testdir.makepyfile("""
+ from twisted.trial.unittest import TestCase
+ from twisted.internet.defer import Deferred
+ from twisted.internet import reactor
+
+ class TC(TestCase):
+ def test_one(self):
+ crash
+
+ def test_two(self):
+ def f(_):
+ crash
+
+ d = Deferred()
+ d.addCallback(f)
+ reactor.callLater(0.3, d.callback, None)
+ return d
+
+ def test_three(self):
+ def f():
+ pass # will never get called
+ reactor.callLater(0.3, f)
+ # will crash at teardown
+
+ def test_four(self):
+ def f(_):
+ reactor.callLater(0.3, f)
+ crash
+
+ d = Deferred()
+ d.addCallback(f)
+ reactor.callLater(0.3, d.callback, None)
+ return d
+ # will crash both at test time and at teardown
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*ERRORS*",
+ "*DelayedCalls*",
+ "*test_four*",
+ "*NameError*crash*",
+ "*test_one*",
+ "*NameError*crash*",
+ "*test_three*",
+ "*DelayedCalls*",
+ "*test_two*",
+ "*crash*",
+ ])
+
+ def test_trial_pdb(self, testdir):
+ p = testdir.makepyfile("""
+ from twisted.trial import unittest
+ import pytest
+ class TC(unittest.TestCase):
+ def test_hello(self):
+ assert 0, "hellopdb"
+ """)
+ child = testdir.spawn_pytest(p)
+ child.expect("hellopdb")
+ child.sendeof()
+
+def test_djangolike_testcase(testdir):
+ # contributed from Morten Breekevold
+ testdir.makepyfile("""
+ from unittest import TestCase, main
+
+ class DjangoLikeTestCase(TestCase):
+
+ def setUp(self):
+ print ("setUp()")
+
+ def test_presetup_has_been_run(self):
+ print ("test_thing()")
+ self.assertTrue(hasattr(self, 'was_presetup'))
+
+ def tearDown(self):
+ print ("tearDown()")
+
+ def __call__(self, result=None):
+ try:
+ self._pre_setup()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception:
+ import sys
+ result.addError(self, sys.exc_info())
+ return
+ super(DjangoLikeTestCase, self).__call__(result)
+ try:
+ self._post_teardown()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception:
+ import sys
+ result.addError(self, sys.exc_info())
+ return
+
+ def _pre_setup(self):
+ print ("_pre_setup()")
+ self.was_presetup = True
+
+ def _post_teardown(self):
+ print ("_post_teardown()")
+ """)
+ result = testdir.runpytest("-s")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*_pre_setup()*",
+ "*setUp()*",
+ "*test_thing()*",
+ "*tearDown()*",
+ "*_post_teardown()*",
+ ])
+
+
+def test_unittest_not_shown_in_traceback(testdir):
+ testdir.makepyfile("""
+ import unittest
+ class t(unittest.TestCase):
+ def test_hello(self):
+ x = 3
+ self.assertEquals(x, 4)
+ """)
+ res = testdir.runpytest()
+ assert "failUnlessEqual" not in res.stdout.str()
+
+def test_unorderable_types(testdir):
+ testdir.makepyfile("""
+ import unittest
+ class TestJoinEmpty(unittest.TestCase):
+ pass
+
+ def make_test():
+ class Test(unittest.TestCase):
+ pass
+ Test.__name__ = "TestFoo"
+ return Test
+ TestFoo = make_test()
+ """)
+ result = testdir.runpytest()
+ assert "TypeError" not in result.stdout.str()
+ assert result.ret == EXIT_NOTESTSCOLLECTED
+
+def test_unittest_typerror_traceback(testdir):
+ testdir.makepyfile("""
+ import unittest
+ class TestJoinEmpty(unittest.TestCase):
+ def test_hello(self, arg1):
+ pass
+ """)
+ result = testdir.runpytest()
+ assert "TypeError" in result.stdout.str()
+ assert result.ret == 1
+
+@pytest.mark.skipif("sys.version_info < (2,7)")
+def test_unittest_unexpected_failure(testdir):
+ testdir.makepyfile("""
+ import unittest
+ class MyTestCase(unittest.TestCase):
+ @unittest.expectedFailure
+ def test_func1(self):
+ assert 0
+ @unittest.expectedFailure
+ def test_func2(self):
+ assert 1
+ """)
+ result = testdir.runpytest("-rxX")
+ result.stdout.fnmatch_lines([
+ "*XFAIL*MyTestCase*test_func1*",
+ "*XPASS*MyTestCase*test_func2*",
+ "*1 xfailed*1 xpass*",
+ ])
+
+
+@pytest.mark.parametrize('fix_type, stmt', [
+ ('fixture', 'return'),
+ ('yield_fixture', 'yield'),
+])
+def test_unittest_setup_interaction(testdir, fix_type, stmt):
+ testdir.makepyfile("""
+ import unittest
+ import pytest
+ class MyTestCase(unittest.TestCase):
+ @pytest.{fix_type}(scope="class", autouse=True)
+ def perclass(self, request):
+ request.cls.hello = "world"
+ {stmt}
+ @pytest.{fix_type}(scope="function", autouse=True)
+ def perfunction(self, request):
+ request.instance.funcname = request.function.__name__
+ {stmt}
+
+ def test_method1(self):
+ assert self.funcname == "test_method1"
+ assert self.hello == "world"
+
+ def test_method2(self):
+ assert self.funcname == "test_method2"
+
+ def test_classattr(self):
+ assert self.__class__.hello == "world"
+ """.format(fix_type=fix_type, stmt=stmt))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines("*3 passed*")
+
+
+def test_non_unittest_no_setupclass_support(testdir):
+ testpath = testdir.makepyfile("""
+ class TestFoo:
+ x = 0
+
+ @classmethod
+ def setUpClass(cls):
+ cls.x = 1
+
+ def test_method1(self):
+ assert self.x == 0
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.x = 1
+
+ def test_not_teareddown():
+ assert TestFoo.x == 0
+
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=2)
+
+
+def test_no_teardown_if_setupclass_failed(testdir):
+ testpath = testdir.makepyfile("""
+ import unittest
+
+ class MyTestCase(unittest.TestCase):
+ x = 0
+
+ @classmethod
+ def setUpClass(cls):
+ cls.x = 1
+ assert False
+
+ def test_func1(self):
+ cls.x = 10
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.x = 100
+
+ def test_notTornDown():
+ assert MyTestCase.x == 1
+ """)
+ reprec = testdir.inline_run(testpath)
+ reprec.assertoutcome(passed=1, failed=1)
+
+
+def test_issue333_result_clearing(testdir):
+ testdir.makeconftest("""
+ def pytest_runtest_call(__multicall__, item):
+ __multicall__.execute()
+ assert 0
+ """)
+ testdir.makepyfile("""
+ import unittest
+ class TestIt(unittest.TestCase):
+ def test_func(self):
+ 0/0
+ """)
+
+ reprec = testdir.inline_run()
+ reprec.assertoutcome(failed=1)
+
+@pytest.mark.skipif("sys.version_info < (2,7)")
+def test_unittest_raise_skip_issue748(testdir):
+ testdir.makepyfile(test_foo="""
+ import unittest
+
+ class MyTestCase(unittest.TestCase):
+ def test_one(self):
+ raise unittest.SkipTest('skipping due to reasons')
+ """)
+ result = testdir.runpytest("-v", '-rs')
+ result.stdout.fnmatch_lines("""
+ *SKIP*[1]*test_foo.py*skipping due to reasons*
+ *1 skipped*
+ """)
+
+@pytest.mark.skipif("sys.version_info < (2,7)")
+def test_unittest_skip_issue1169(testdir):
+ testdir.makepyfile(test_foo="""
+ import unittest
+
+ class MyTestCase(unittest.TestCase):
+ @unittest.skip("skipping due to reasons")
+ def test_skip(self):
+ self.fail()
+ """)
+ result = testdir.runpytest("-v", '-rs')
+ result.stdout.fnmatch_lines("""
+ *SKIP*[1]*skipping due to reasons*
+ *1 skipped*
+ """)
diff --git a/testing/web-platform/tests/tools/pytest/tox.ini b/testing/web-platform/tests/tools/pytest/tox.ini
new file mode 100644
index 000000000..5f65446e4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/tox.ini
@@ -0,0 +1,160 @@
+[tox]
+minversion=2.0
+distshare={homedir}/.tox/distshare
+envlist=
+ linting,py26,py27,py33,py34,py35,pypy,
+ {py27,py35}-{pexpect,xdist,trial},
+ py27-nobyte,doctesting,py27-cxfreeze
+
+[testenv]
+commands= py.test --lsof -rfsxX {posargs:testing}
+passenv = USER USERNAME
+deps=
+ nose
+ mock
+ requests
+
+[testenv:py26]
+commands= py.test --lsof -rfsxX {posargs:testing}
+deps=
+ nose
+ mock<1.1 # last supported version for py26
+
+[testenv:py27-subprocess]
+changedir=.
+basepython=python2.7
+deps=pytest-xdist>=1.13
+ mock
+ nose
+commands=
+ py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing}
+
+[testenv:genscript]
+commands= py.test --genscript=pytest1
+
+[testenv:linting]
+basepython = python2.7
+deps = flake8
+ restructuredtext_lint
+commands = flake8 pytest.py _pytest testing
+ rst-lint CHANGELOG.rst
+
+[testenv:py27-xdist]
+deps=pytest-xdist>=1.13
+ mock
+ nose
+commands=
+ py.test -n1 -rfsxX {posargs:testing}
+
+[testenv:py35-xdist]
+deps={[testenv:py27-xdist]deps}
+commands=
+ py.test -n3 -rfsxX {posargs:testing}
+
+[testenv:py27-pexpect]
+changedir=testing
+platform=linux|darwin
+deps=pexpect
+commands=
+ py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py
+
+[testenv:py35-pexpect]
+changedir=testing
+platform=linux|darwin
+deps={[testenv:py27-pexpect]deps}
+commands=
+ py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py
+
+[testenv:py27-nobyte]
+deps=pytest-xdist>=1.13
+distribute=true
+setenv=
+ PYTHONDONTWRITEBYTECODE=1
+commands=
+ py.test -n3 -rfsxX {posargs:testing}
+
+[testenv:py27-trial]
+deps=twisted
+commands=
+ py.test -rsxf {posargs:testing/test_unittest.py}
+
+[testenv:py35-trial]
+platform=linux|darwin
+deps={[testenv:py27-trial]deps}
+commands=
+ py.test -rsxf {posargs:testing/test_unittest.py}
+
+[testenv:doctest]
+commands=py.test --doctest-modules _pytest
+deps=
+
+[testenv:doc]
+basepython=python
+changedir=doc/en
+deps=sphinx
+ PyYAML
+
+commands=
+ make clean
+ make html
+
+[testenv:doctesting]
+basepython = python3.4
+changedir=doc/en
+deps=PyYAML
+commands= py.test -rfsxX {posargs}
+
+[testenv:regen]
+changedir=doc/en
+basepython = python3.4
+deps=sphinx
+ PyYAML
+ regendoc>=0.6.1
+whitelist_externals=
+ rm
+ make
+commands=
+ rm -rf /tmp/doc-exec*
+ make regen
+
+[testenv:jython]
+changedir=testing
+commands=
+ {envpython} {envbindir}/py.test-jython -rfsxX {posargs}
+
+[testenv:py27-cxfreeze]
+changedir=testing/cx_freeze
+platform=linux|darwin
+commands=
+ {envpython} install_cx_freeze.py
+ {envpython} runtests_setup.py build --build-exe build
+ {envpython} tox_run.py
+
+
+[testenv:coveralls]
+passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH COVERALLS_REPO_TOKEN
+usedevelop=True
+basepython=python3.4
+changedir=.
+deps =
+ {[testenv]deps}
+ coveralls
+commands=
+ coverage run --source=_pytest -m pytest testing
+ coverage report -m
+ coveralls
+
+[pytest]
+minversion=2.0
+plugins=pytester
+#--pyargs --doctest-modules --ignore=.tox
+addopts= -rxsX -p pytester --ignore=testing/cx_freeze
+rsyncdirs=tox.ini pytest.py _pytest testing
+python_files=test_*.py *_test.py testing/*/*.py
+python_classes=Test Acceptance
+python_functions=test
+norecursedirs = .tox ja .hg cx_freeze_source
+
+
+[flake8]
+ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202,E704,E731,E402
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/COPYING b/testing/web-platform/tests/tools/pywebsocket/src/COPYING
new file mode 100644
index 000000000..989d02e4c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/COPYING
@@ -0,0 +1,28 @@
+Copyright 2012, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/MANIFEST.in b/testing/web-platform/tests/tools/pywebsocket/src/MANIFEST.in
new file mode 100644
index 000000000..19256882c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/MANIFEST.in
@@ -0,0 +1,6 @@
+include COPYING
+include MANIFEST.in
+include README
+recursive-include example *.py
+recursive-include mod_pywebsocket *.py
+recursive-include test *.py
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/README b/testing/web-platform/tests/tools/pywebsocket/src/README
new file mode 100644
index 000000000..c8c758f5e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/README
@@ -0,0 +1,17 @@
+INSTALL
+
+To install this package to the system, run this:
+$ python setup.py build
+$ sudo python setup.py install
+
+To install this package as a normal user, run this instead:
+$ python setup.py build
+$ python setup.py install --user
+
+LAUNCH
+
+To use pywebsocket as Apache module, run this to read the document:
+$ pydoc mod_pywebsocket
+
+To use pywebsocket as standalone server, run this to read the document:
+$ pydoc mod_pywebsocket.standalone
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/abort_handshake_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/abort_handshake_wsh.py
new file mode 100644
index 000000000..008023a1f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/abort_handshake_wsh.py
@@ -0,0 +1,43 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import handshake
+
+
+def web_socket_do_extra_handshake(request):
+ raise handshake.AbortedByUserException(
+ "Aborted in web_socket_do_extra_handshake")
+
+
+def web_socket_transfer_data(request):
+ pass
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/abort_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/abort_wsh.py
new file mode 100644
index 000000000..2bbf005f6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/abort_wsh.py
@@ -0,0 +1,43 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import handshake
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ raise handshake.AbortedByUserException(
+ "Aborted in web_socket_transfer_data")
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/arraybuffer_benchmark.html b/testing/web-platform/tests/tools/pywebsocket/src/example/arraybuffer_benchmark.html
new file mode 100644
index 000000000..869cd7e1e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/arraybuffer_benchmark.html
@@ -0,0 +1,134 @@
+<!--
+Copyright 2013, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<html>
+<head>
+<title>ArrayBuffer benchmark</title>
+<script src="util.js"></script>
+<script>
+var PRINT_SIZE = true;
+
+// Initial size of arrays.
+var START_SIZE = 10 * 1024;
+// Stops benchmark when the size of an array exceeds this threshold.
+var STOP_THRESHOLD = 100000 * 1024;
+// If the size of each array is small, write/read the array multiple times
+// until the sum of sizes reaches this threshold.
+var MIN_TOTAL = 100000 * 1024;
+var MULTIPLIERS = [5, 2];
+
+// Repeat benchmark for several times to measure performance of optimized
+// (such as JIT) run.
+var REPEAT_FOR_WARMUP = 3;
+
+function writeBenchmark(size, minTotal) {
+ var totalSize = 0;
+ while (totalSize < minTotal) {
+ var arrayBuffer = new ArrayBuffer(size);
+
+ // Write 'a's.
+ fillArrayBuffer(arrayBuffer, 0x61);
+
+ totalSize += size;
+ }
+ return totalSize;
+}
+
+function readBenchmark(size, minTotal) {
+ var totalSize = 0;
+ while (totalSize < minTotal) {
+ var arrayBuffer = new ArrayBuffer(size);
+
+ if (!verifyArrayBuffer(arrayBuffer, 0x00)) {
+ queueLog('Verification failed');
+ return -1;
+ }
+
+ totalSize += size;
+ }
+ return totalSize;
+}
+
+function runBenchmark(benchmarkFunction,
+ size,
+ stopThreshold,
+ minTotal,
+ multipliers,
+ multiplierIndex) {
+ while (size <= stopThreshold) {
+ var maxSpeed = 0;
+
+ for (var i = 0; i < REPEAT_FOR_WARMUP; ++i) {
+ var startTimeInMs = getTimeStamp();
+
+ var totalSize = benchmarkFunction(size, minTotal);
+
+ maxSpeed = Math.max(maxSpeed,
+ calculateSpeedInKB(totalSize, startTimeInMs));
+ }
+ queueLog(formatResultInKiB(size, maxSpeed, PRINT_SIZE));
+
+ size *= multipliers[multiplierIndex];
+ multiplierIndex = (multiplierIndex + 1) % multipliers.length;
+ }
+}
+
+function runBenchmarks() {
+ queueLog('Message size in KiB, Speed in kB/s');
+
+ queueLog('Write benchmark');
+ runBenchmark(
+ writeBenchmark, START_SIZE, STOP_THRESHOLD, MIN_TOTAL, MULTIPLIERS, 0);
+ queueLog('Finished');
+
+ queueLog('Read benchmark');
+ runBenchmark(
+ readBenchmark, START_SIZE, STOP_THRESHOLD, MIN_TOTAL, MULTIPLIERS, 0);
+ addToLog('Finished');
+}
+
+function init() {
+ logBox = document.getElementById('log');
+
+ queueLog(window.navigator.userAgent.toLowerCase());
+
+ addToLog('Started...');
+
+ setTimeout(runBenchmarks, 0);
+}
+
+</script>
+</head>
+<body onload="init()">
+<textarea
+ id="log" rows="50" style="width: 100%" readonly></textarea>
+</body>
+</html>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/bench_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/bench_wsh.py
new file mode 100644
index 000000000..5067ca7d8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/bench_wsh.py
@@ -0,0 +1,60 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""A simple load tester for WebSocket clients.
+
+A client program sends a message formatted as "<time> <count> <message>" to
+this handler. This handler starts sending total <count> WebSocket messages
+containing <message> every <time> seconds. <time> can be a floating point
+value. <count> must be an integer value.
+"""
+
+
+import time
+
+
+def web_socket_do_extra_handshake(request):
+ pass # Always accept.
+
+
+def web_socket_transfer_data(request):
+ line = request.ws_stream.receive_message()
+ parts = line.split(' ')
+ if len(parts) != 3:
+ raise ValueError('Bad parameter format')
+ wait = float(parts[0])
+ count = int(parts[1])
+ message = parts[2]
+ for i in xrange(count):
+ request.ws_stream.send_message(message)
+ time.sleep(wait)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.html b/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.html
new file mode 100644
index 000000000..3a218173a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.html
@@ -0,0 +1,203 @@
+<!--
+Copyright 2013, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<html>
+<head>
+<title>WebSocket benchmark</title>
+<script src="util_main.js"></script>
+<script src="util.js"></script>
+<script src="benchmark.js"></script>
+<script>
+var addressBox = null;
+
+function getConfig() {
+ return {
+ prefixUrl: addressBox.value,
+ printSize: getBoolFromCheckBox('printsize'),
+ numSockets: getIntFromInput('numsockets'),
+ // Initial size of messages.
+ numIterations: getIntFromInput('numiterations'),
+ numWarmUpIterations: getIntFromInput('numwarmupiterations'),
+ startSize: getIntFromInput('startsize'),
+ // Stops benchmark when the size of message exceeds this threshold.
+ stopThreshold: getIntFromInput('stopthreshold'),
+ // If the size of each message is small, send/receive multiple messages
+ // until the sum of sizes reaches this threshold.
+ minTotal: getIntFromInput('mintotal'),
+ multipliers: getIntArrayFromInput('multipliers'),
+ verifyData: getBoolFromCheckBox('verifydata')
+ };
+}
+
+var worker = new Worker('benchmark.js');
+worker.onmessage = onMessage;
+
+function onSendBenchmark() {
+ var config = getConfig();
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'sendBenchmark', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ sendBenchmark(config);
+ }
+}
+
+function onReceiveBenchmark() {
+ var config = getConfig();
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'receiveBenchmark', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ receiveBenchmark(config);
+ }
+}
+
+function onBatchBenchmark() {
+ var config = getConfig();
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'batchBenchmark', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ batchBenchmark(config);
+ }
+}
+
+function onStop() {
+ var config = getConfig();
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'stop', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ stop(config);
+ }
+}
+function init() {
+ addressBox = document.getElementById('address');
+ logBox = document.getElementById('log');
+
+ summaryBox = document.getElementById('summary');
+
+ var scheme = window.location.protocol == 'https:' ? 'wss://' : 'ws://';
+ var defaultAddress = scheme + window.location.host + '/benchmark_helper';
+
+ addressBox.value = defaultAddress;
+
+ addToLog(window.navigator.userAgent.toLowerCase());
+ addToSummary(window.navigator.userAgent.toLowerCase());
+
+ if (!('WebSocket' in window)) {
+ addToLog('WebSocket is not available');
+ }
+}
+</script>
+</head>
+<body onload="init()">
+
+<div id="benchmark_div">
+ url <input type="text" id="address" size="40">
+ <input type="button" value="send" onclick="onSendBenchmark()">
+ <input type="button" value="receive" onclick="onReceiveBenchmark()">
+ <input type="button" value="batch" onclick="onBatchBenchmark()">
+ <input type="button" value="stop" onclick="onStop()">
+
+ <br/>
+
+ <input type="checkbox" id="printsize" checked>
+ <label for="printsize">Print size and time per message</label>
+ <input type="checkbox" id="verifydata" checked>
+ <label for="verifydata">Verify data</label>
+ <input type="checkbox" id="worker">
+ <label for="worker">Run on worker</label>
+
+ <br/>
+
+ Parameters:
+
+ <br/>
+
+ <table>
+ <tr>
+ <td>Num sockets</td>
+ <td><input type="text" id="numsockets" value="1"></td>
+ </tr>
+ <tr>
+ <td>Number of iterations</td>
+ <td><input type="text" id="numiterations" value="1"></td>
+ </tr>
+ <tr>
+ <td>Number of warm-up iterations</td>
+ <td><input type="text" id="numwarmupiterations" value="0"></td>
+ </tr>
+ <tr>
+ <td>Start size</td>
+ <td><input type="text" id="startsize" value="10240"></td>
+ </tr>
+ <tr>
+ <td>Stop threshold</td>
+ <td><input type="text" id="stopthreshold" value="102400000"></td>
+ </tr>
+ <tr>
+ <td>Minimum total</td>
+ <td><input type="text" id="mintotal" value="102400000"></td>
+ </tr>
+ <tr>
+ <td>Multipliers</td>
+ <td><input type="text" id="multipliers" value="5, 2"></td>
+ </tr>
+ </table>
+</div>
+
+<div id="log_div">
+ <textarea
+ id="log" rows="20" style="width: 100%" readonly></textarea>
+</div>
+<div id="summary_div">
+ Summary
+ <textarea
+ id="summary" rows="20" style="width: 100%" readonly></textarea>
+</div>
+
+Note: Effect of RTT is not eliminated.
+
+</body>
+</html>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.js b/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.js
new file mode 100644
index 000000000..d347ae9e1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark.js
@@ -0,0 +1,309 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the COPYING file or at
+// https://developers.google.com/open-source/licenses/bsd
+
+if (typeof importScripts !== "undefined") {
+ // Running on a worker
+ importScripts('util.js', 'util_worker.js');
+}
+
+// Namespace for holding globals.
+var benchmark = {startTimeInMs: 0};
+
+var sockets = [];
+var numEstablishedSockets = 0;
+
+var timerID = null;
+
+function destroySocket(socket) {
+ socket.onopen = null;
+ socket.onmessage = null;
+ socket.onerror = null;
+ socket.onclose = null;
+ socket.close();
+}
+
+function destroyAllSockets() {
+ for (var i = 0; i < sockets.length; ++i) {
+ destroySocket(sockets[i]);
+ }
+ sockets = [];
+}
+
+function sendBenchmarkStep(size, config) {
+ timerID = null;
+
+ var totalSize = 0;
+ var totalReplied = 0;
+
+ var onMessageHandler = function(event) {
+ if (!verifyAcknowledgement(config, event.data, size)) {
+ destroyAllSockets();
+ return;
+ }
+
+ totalReplied += size;
+
+ if (totalReplied < totalSize) {
+ return;
+ }
+
+ calculateAndLogResult(config, size, benchmark.startTimeInMs, totalSize);
+
+ runNextTask(config);
+ };
+
+ for (var i = 0; i < sockets.length; ++i) {
+ var socket = sockets[i];
+ socket.onmessage = onMessageHandler;
+ }
+
+ var dataArray = [];
+
+ while (totalSize < config.minTotal) {
+ var buffer = new ArrayBuffer(size);
+
+ fillArrayBuffer(buffer, 0x61);
+
+ dataArray.push(buffer);
+ totalSize += size;
+ }
+
+ benchmark.startTimeInMs = getTimeStamp();
+
+ totalSize = 0;
+
+ var socketIndex = 0;
+ var dataIndex = 0;
+ while (totalSize < config.minTotal) {
+ var command = ['send'];
+ command.push(config.verifyData ? '1' : '0');
+ sockets[socketIndex].send(command.join(' '));
+ sockets[socketIndex].send(dataArray[dataIndex]);
+ socketIndex = (socketIndex + 1) % sockets.length;
+
+ totalSize += size;
+ ++dataIndex;
+ }
+}
+
+function receiveBenchmarkStep(size, config) {
+ timerID = null;
+
+ var totalSize = 0;
+ var totalReplied = 0;
+
+ var onMessageHandler = function(event) {
+ var bytesReceived = event.data.byteLength;
+ if (bytesReceived != size) {
+ config.addToLog('Expected ' + size + 'B but received ' +
+ bytesReceived + 'B');
+ destroyAllSockets();
+ return;
+ }
+
+ if (config.verifyData && !verifyArrayBuffer(event.data, 0x61)) {
+ config.addToLog('Response verification failed');
+ destroyAllSockets();
+ return;
+ }
+
+ totalReplied += bytesReceived;
+
+ if (totalReplied < totalSize) {
+ return;
+ }
+
+ calculateAndLogResult(config, size, benchmark.startTimeInMs, totalSize);
+
+ runNextTask(config);
+ };
+
+ for (var i = 0; i < sockets.length; ++i) {
+ var socket = sockets[i];
+ socket.binaryType = 'arraybuffer';
+ socket.onmessage = onMessageHandler;
+ }
+
+ benchmark.startTimeInMs = getTimeStamp();
+
+ var socketIndex = 0;
+ while (totalSize < config.minTotal) {
+ sockets[socketIndex].send('receive ' + size);
+ socketIndex = (socketIndex + 1) % sockets.length;
+
+ totalSize += size;
+ }
+}
+
+function createSocket(config) {
+ // TODO(tyoshino): Add TCP warm up.
+ var url = config.prefixUrl;
+
+ config.addToLog('Connect ' + url);
+
+ var socket = new WebSocket(url);
+ socket.onmessage = function(event) {
+ config.addToLog('Unexpected message received. Aborting.');
+ };
+ socket.onerror = function() {
+ config.addToLog('Error');
+ };
+ socket.onclose = function(event) {
+ config.addToLog('Closed');
+ };
+ return socket;
+}
+
+var tasks = [];
+
+function startBenchmark(config) {
+ clearTimeout(timerID);
+ destroyAllSockets();
+
+ numEstablishedSockets = 0;
+
+ for (var i = 0; i < config.numSockets; ++i) {
+ var socket = createSocket(config);
+ socket.onopen = function() {
+ config.addToLog('Opened');
+
+ ++numEstablishedSockets;
+
+ if (numEstablishedSockets == sockets.length) {
+ runNextTask(config);
+ }
+ };
+ sockets.push(socket);
+ }
+}
+
+function runNextTask(config) {
+ var task = tasks.shift();
+ if (task == undefined) {
+ config.addToLog('Finished');
+ destroyAllSockets();
+ return;
+ }
+ timerID = setTimeout(task, 0);
+}
+
+function buildLegendString(config) {
+ var legend = ''
+ if (config.printSize)
+ legend = 'Message size in KiB, Time/message in ms, ';
+ legend += 'Speed in kB/s';
+ return legend;
+}
+
+function getConfigString(config) {
+ return '(WebSocket' +
+ ', ' + (typeof importScripts !== "undefined" ? 'Worker' : 'Main') +
+ ', numSockets=' + config.numSockets +
+ ', numIterations=' + config.numIterations +
+ ', verifyData=' + config.verifyData +
+ ', minTotal=' + config.minTotal +
+ ', numWarmUpIterations=' + config.numWarmUpIterations +
+ ')';
+}
+
+function addTasks(config, stepFunc) {
+ for (var i = 0;
+ i < config.numWarmUpIterations + config.numIterations; ++i) {
+ // Ignore the first |config.numWarmUpIterations| iterations.
+ if (i == config.numWarmUpIterations)
+ addResultClearingTask(config);
+
+ var multiplierIndex = 0;
+ for (var size = config.startSize;
+ size <= config.stopThreshold;
+ ++multiplierIndex) {
+ var task = stepFunc.bind(
+ null,
+ size,
+ config);
+ tasks.push(task);
+ size *= config.multipliers[
+ multiplierIndex % config.multipliers.length];
+ }
+ }
+}
+
+function addResultReportingTask(config, title) {
+ tasks.push(function(){
+ timerID = null;
+ config.addToSummary(title);
+ reportAverageData(config);
+ clearAverageData();
+ runNextTask(config);
+ });
+}
+
+function addResultClearingTask(config) {
+ tasks.push(function(){
+ timerID = null;
+ clearAverageData();
+ runNextTask(config);
+ });
+}
+
+function sendBenchmark(config) {
+ config.addToLog('Send benchmark');
+ config.addToLog(buildLegendString(config));
+
+ tasks = [];
+ clearAverageData();
+ addTasks(config, sendBenchmarkStep);
+ addResultReportingTask(config, 'Send Benchmark ' + getConfigString(config));
+ startBenchmark(config);
+}
+
+function receiveBenchmark(config) {
+ config.addToLog('Receive benchmark');
+ config.addToLog(buildLegendString(config));
+
+ tasks = [];
+ clearAverageData();
+ addTasks(config, receiveBenchmarkStep);
+ addResultReportingTask(config,
+ 'Receive Benchmark ' + getConfigString(config));
+ startBenchmark(config);
+}
+
+function batchBenchmark(config) {
+ config.addToLog('Batch benchmark');
+ config.addToLog(buildLegendString(config));
+
+ tasks = [];
+ clearAverageData();
+ addTasks(config, sendBenchmarkStep);
+ addResultReportingTask(config, 'Send Benchmark ' + getConfigString(config));
+ addTasks(config, receiveBenchmarkStep);
+ addResultReportingTask(config, 'Receive Benchmark ' +
+ getConfigString(config));
+ startBenchmark(config);
+}
+
+function stop(config) {
+ clearTimeout(timerID);
+ timerID = null;
+ config.addToLog('Stopped');
+ destroyAllSockets();
+}
+
+onmessage = function (message) {
+ var config = message.data.config;
+ config.addToLog = workerAddToLog;
+ config.addToSummary = workerAddToSummary;
+ config.measureValue = workerMeasureValue;
+ if (message.data.type === 'sendBenchmark')
+ sendBenchmark(config);
+ else if (message.data.type === 'receiveBenchmark')
+ receiveBenchmark(config);
+ else if (message.data.type === 'batchBenchmark')
+ batchBenchmark(config);
+ else if (message.data.type === 'stop')
+ stop(config);
+};
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark_helper_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark_helper_wsh.py
new file mode 100644
index 000000000..44ad0bfee
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/benchmark_helper_wsh.py
@@ -0,0 +1,85 @@
+# Copyright 2013, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Handler for benchmark.html."""
+
+
+def web_socket_do_extra_handshake(request):
+ # Turn off compression.
+ request.ws_extension_processors = []
+
+
+def web_socket_transfer_data(request):
+ data = ''
+
+ while True:
+ command = request.ws_stream.receive_message()
+ if command is None:
+ return
+
+ if not isinstance(command, unicode):
+ raise ValueError('Invalid command data:' + command)
+ commands = command.split(' ')
+ if len(commands) == 0:
+ raise ValueError('Invalid command data: ' + command)
+
+ if commands[0] == 'receive':
+ if len(commands) != 2:
+ raise ValueError(
+ 'Illegal number of arguments for send command' +
+ command)
+ size = int(commands[1])
+
+ # Reuse data if possible.
+ if len(data) != size:
+ data = 'a' * size
+ request.ws_stream.send_message(data, binary=True)
+ elif commands[0] == 'send':
+ if len(commands) != 2:
+ raise ValueError(
+ 'Illegal number of arguments for receive command' +
+ command)
+ verify_data = commands[1] == '1'
+
+ data = request.ws_stream.receive_message()
+ if data is None:
+ raise ValueError('Payload not received')
+ size = len(data)
+
+ if verify_data:
+ if data != 'a' * size:
+ raise ValueError('Payload verification failed')
+
+ request.ws_stream.send_message(str(size))
+ else:
+ raise ValueError('Invalid command: ' + commands[0])
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/close_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/close_wsh.py
new file mode 100644
index 000000000..26b083840
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/close_wsh.py
@@ -0,0 +1,69 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket import stream
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ while True:
+ line = request.ws_stream.receive_message()
+ if line is None:
+ return
+ code, reason = line.split(' ', 1)
+ if code is None or reason is None:
+ return
+ request.ws_stream.close_connection(int(code), reason)
+ # close_connection() initiates closing handshake. It validates code
+ # and reason. If you want to send a broken close frame for a test,
+ # following code will be useful.
+ # > data = struct.pack('!H', int(code)) + reason.encode('UTF-8')
+ # > request.connection.write(stream.create_close_frame(data))
+ # > # Suppress to re-respond client responding close frame.
+ # > raise Exception("customized server initiated closing handshake")
+
+
+def web_socket_passive_closing_handshake(request):
+ # Simply echo a close status code
+ code, reason = request.ws_close_code, request.ws_close_reason
+
+ # pywebsocket sets pseudo code for receiving an empty body close frame.
+ if code == common.STATUS_NO_STATUS_RECEIVED:
+ code = None
+ reason = ''
+ return code, reason
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/console.html b/testing/web-platform/tests/tools/pywebsocket/src/example/console.html
new file mode 100644
index 000000000..ccd6d8f80
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/console.html
@@ -0,0 +1,317 @@
+<!--
+Copyright 2011, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<!--
+A simple console for testing WebSocket server.
+
+Type an address into the top text input and click connect to establish
+WebSocket. Then, type some message into the bottom text input and click send
+to send the message. Received/sent messages and connection state will be shown
+on the middle textarea.
+-->
+
+<html>
+<head>
+<title>WebSocket console</title>
+<script>
+var socket = null;
+
+var showTimeStamp = false;
+
+var addressBox = null;
+var protocolsBox = null;
+var logBox = null;
+var messageBox = null;
+var fileBox = null;
+var codeBox = null;
+var reasonBox = null;
+
+function getTimeStamp() {
+ return new Date().getTime();
+}
+
+function addToLog(log) {
+ if (showTimeStamp) {
+ logBox.value += '[' + getTimeStamp() + '] ';
+ }
+ logBox.value += log + '\n'
+ // Large enough to keep showing the latest message.
+ logBox.scrollTop = 1000000;
+}
+
+function setbinarytype(binaryType) {
+ if (!socket) {
+ addToLog('Not connected');
+ return;
+ }
+
+ socket.binaryType = binaryType;
+ addToLog('Set binaryType to ' + binaryType);
+}
+
+function send() {
+ if (!socket) {
+ addToLog('Not connected');
+ return;
+ }
+
+ socket.send(messageBox.value);
+ addToLog('> ' + messageBox.value);
+ messageBox.value = '';
+}
+
+function sendfile() {
+ if (!socket) {
+ addToLog('Not connected');
+ return;
+ }
+
+ var files = fileBox.files;
+
+ if (files.length == 0) {
+ addToLog('File not selected');
+ return;
+ }
+
+ socket.send(files[0]);
+ addToLog('> Send ' + files[0].name);
+}
+
+function parseProtocols(protocolsText) {
+ var protocols = protocolsText.split(',');
+ for (var i = 0; i < protocols.length; ++i) {
+ protocols[i] = protocols[i].trim();
+ }
+
+ if (protocols.length == 0) {
+ // Don't pass.
+ protocols = null;
+ } else if (protocols.length == 1) {
+ if (protocols[0].length == 0) {
+ // Don't pass.
+ protocols = null;
+ } else {
+ // Pass as a string.
+ protocols = protocols[0];
+ }
+ }
+
+ return protocols;
+}
+
+function connect() {
+ var url = addressBox.value;
+ var protocols = parseProtocols(protocolsBox.value);
+
+ if ('WebSocket' in window) {
+ if (protocols) {
+ socket = new WebSocket(url, protocols);
+ } else {
+ socket = new WebSocket(url);
+ }
+ } else {
+ return;
+ }
+
+ socket.onopen = function () {
+ var extraInfo = [];
+ if (('protocol' in socket) && socket.protocol) {
+ extraInfo.push('protocol = ' + socket.protocol);
+ }
+ if (('extensions' in socket) && socket.extensions) {
+ extraInfo.push('extensions = ' + socket.extensions);
+ }
+
+ var logMessage = 'Opened';
+ if (extraInfo.length > 0) {
+ logMessage += ' (' + extraInfo.join(', ') + ')';
+ }
+ addToLog(logMessage);
+ };
+ socket.onmessage = function (event) {
+ if (('ArrayBuffer' in window) && (event.data instanceof ArrayBuffer)) {
+ addToLog('< Received an ArrayBuffer of ' + event.data.byteLength +
+ ' bytes')
+ } else if (('Blob' in window) && (event.data instanceof Blob)) {
+ addToLog('< Received a Blob of ' + event.data.size + ' bytes')
+ } else {
+ addToLog('< ' + event.data);
+ }
+ };
+ socket.onerror = function () {
+ addToLog('Error');
+ };
+ socket.onclose = function (event) {
+ var logMessage = 'Closed (';
+ if ((arguments.length == 1) && ('CloseEvent' in window) &&
+ (event instanceof CloseEvent)) {
+ logMessage += 'wasClean = ' + event.wasClean;
+ // code and reason are present only for
+ // draft-ietf-hybi-thewebsocketprotocol-06 and later
+ if ('code' in event) {
+ logMessage += ', code = ' + event.code;
+ }
+ if ('reason' in event) {
+ logMessage += ', reason = ' + event.reason;
+ }
+ } else {
+ logMessage += 'CloseEvent is not available';
+ }
+ addToLog(logMessage + ')');
+ };
+
+ if (protocols) {
+ addToLog('Connect ' + url + ' (protocols = ' + protocols + ')');
+ } else {
+ addToLog('Connect ' + url);
+ }
+}
+
+function closeSocket() {
+ if (!socket) {
+ addToLog('Not connected');
+ return;
+ }
+
+ if (codeBox.value || reasonBox.value) {
+ socket.close(codeBox.value, reasonBox.value);
+ } else {
+ socket.close();
+ }
+}
+
+function printState() {
+ if (!socket) {
+ addToLog('Not connected');
+ return;
+ }
+
+ addToLog(
+ 'url = ' + socket.url +
+ ', readyState = ' + socket.readyState +
+ ', bufferedAmount = ' + socket.bufferedAmount);
+}
+
+function init() {
+ var scheme = window.location.protocol == 'https:' ? 'wss://' : 'ws://';
+ var defaultAddress = scheme + window.location.host + '/echo';
+
+ addressBox = document.getElementById('address');
+ protocolsBox = document.getElementById('protocols');
+ logBox = document.getElementById('log');
+ messageBox = document.getElementById('message');
+ fileBox = document.getElementById('file');
+ codeBox = document.getElementById('code');
+ reasonBox = document.getElementById('reason');
+
+ addressBox.value = defaultAddress;
+
+ if (!('WebSocket' in window)) {
+ addToLog('WebSocket is not available');
+ }
+}
+</script>
+<style type="text/css">
+form {
+ margin: 0px;
+}
+
+#connect_div, #log_div, #send_div, #sendfile_div, #close_div, #printstate_div {
+ padding: 5px;
+ margin: 5px;
+ border-width: 0px 0px 0px 10px;
+ border-style: solid;
+ border-color: silver;
+}
+</style>
+</head>
+<body onload="init()">
+
+<div>
+
+<div id="connect_div">
+ <form action="#" onsubmit="connect(); return false;">
+ url <input type="text" id="address" size="40">
+ <input type="submit" value="connect">
+ <br/>
+ protocols <input type="text" id="protocols" size="20">
+ </form>
+</div>
+
+<div id="log_div">
+ <textarea id="log" rows="10" cols="40" readonly></textarea>
+ <br/>
+ <input type="checkbox"
+ name="showtimestamp"
+ value="showtimestamp"
+ onclick="showTimeStamp = this.checked">Show time stamp
+</div>
+
+<div id="send_div">
+ <form action="#" onsubmit="send(); return false;">
+ data <input type="text" id="message" size="40">
+ <input type="submit" value="send">
+ </form>
+</div>
+
+<div id="sendfile_div">
+ <form action="#" onsubmit="sendfile(); return false;">
+ <input type="file" id="file" size="40">
+ <input type="submit" value="send file">
+ </form>
+
+ Set binaryType
+ <input type="radio"
+ name="binarytype"
+ value="blob"
+ onclick="setbinarytype('blob')" checked>blob
+ <input type="radio"
+ name="binarytype"
+ value="arraybuffer"
+ onclick="setbinarytype('arraybuffer')">arraybuffer
+</div>
+
+<div id="close_div">
+ <form action="#" onsubmit="closeSocket(); return false;">
+ code <input type="text" id="code" size="10">
+ reason <input type="text" id="reason" size="20">
+ <input type="submit" value="close">
+ </form>
+</div>
+
+<div id="printstate_div">
+ <input type="button" value="print state" onclick="printState();">
+</div>
+
+</div>
+
+</body>
+</html>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/cookie_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/cookie_wsh.py
new file mode 100644
index 000000000..8b327152e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/cookie_wsh.py
@@ -0,0 +1,32 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Use of this source code is governed by a BSD-style
+# license that can be found in the COPYING file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+
+import urlparse
+
+
+def _add_set_cookie(request, value):
+ request.extra_headers.append(('Set-Cookie', value))
+
+
+def web_socket_do_extra_handshake(request):
+ components = urlparse.urlparse(request.uri)
+ command = components[4]
+
+ ONE_DAY_LIFE = 'Max-Age=86400'
+
+ if command == 'set':
+ _add_set_cookie(request, '; '.join(['foo=bar', ONE_DAY_LIFE]))
+ elif command == 'set_httponly':
+ _add_set_cookie(request,
+ '; '.join(['httpOnlyFoo=bar', ONE_DAY_LIFE, 'httpOnly']))
+ elif command == 'clear':
+ _add_set_cookie(request, 'foo=0; Max-Age=0')
+ _add_set_cookie(request, 'httpOnlyFoo=0; Max-Age=0')
+
+
+def web_socket_transfer_data(request):
+ pass
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/echo_client.py b/testing/web-platform/tests/tools/pywebsocket/src/example/echo_client.py
new file mode 100755
index 000000000..943ce64e8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/echo_client.py
@@ -0,0 +1,1128 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Simple WebSocket client named echo_client just because of historical reason.
+
+mod_pywebsocket directory must be in PYTHONPATH.
+
+Example Usage:
+
+# server setup
+ % cd $pywebsocket
+ % PYTHONPATH=$cwd/src python ./mod_pywebsocket/standalone.py -p 8880 \
+ -d $cwd/src/example
+
+# run client
+ % PYTHONPATH=$cwd/src python ./src/example/echo_client.py -p 8880 \
+ -s localhost \
+ -o http://localhost -r /echo -m test
+
+or
+
+# run echo client to test IETF HyBi 00 protocol
+ run with --protocol-version=hybi00
+"""
+
+
+import base64
+import codecs
+import logging
+from optparse import OptionParser
+import os
+import random
+import re
+import socket
+import struct
+import sys
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor
+from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor
+from mod_pywebsocket.extensions import _PerMessageDeflateFramer
+from mod_pywebsocket.extensions import _parse_window_bits
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+_TIMEOUT_SEC = 10
+_UNDEFINED_PORT = -1
+
+_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
+_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
+_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
+
+# Special message that tells the echo server to start closing handshake
+_GOODBYE_MESSAGE = 'Goodbye'
+
+_PROTOCOL_VERSION_HYBI13 = 'hybi13'
+_PROTOCOL_VERSION_HYBI08 = 'hybi08'
+_PROTOCOL_VERSION_HYBI00 = 'hybi00'
+_PROTOCOL_VERSION_HIXIE75 = 'hixie75'
+
+# Constants for the --tls_module flag.
+_TLS_BY_STANDARD_MODULE = 'ssl'
+_TLS_BY_PYOPENSSL = 'pyopenssl'
+
+# Values used by the --tls-version flag.
+_TLS_VERSION_SSL23 = 'ssl23'
+_TLS_VERSION_SSL3 = 'ssl3'
+_TLS_VERSION_TLS1 = 'tls1'
+
+
+class ClientHandshakeError(Exception):
+ pass
+
+
+def _build_method_line(resource):
+ return 'GET %s HTTP/1.1\r\n' % resource
+
+
+def _origin_header(header, origin):
+ # 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
+ # and the /origin/ value, converted to ASCII lowercase, to /fields/.
+ return '%s: %s\r\n' % (header, origin.lower())
+
+
+def _format_host_header(host, port, secure):
+ # 4.1 9. Let /hostport/ be an empty string.
+ # 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
+ # /hostport/
+ hostport = host.lower()
+ # 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
+ # is true, and /port/ is not 443, then append a U+003A COLON character
+ # (:) followed by the value of /port/, expressed as a base-ten integer,
+ # to /hostport/
+ if ((not secure and port != common.DEFAULT_WEB_SOCKET_PORT) or
+ (secure and port != common.DEFAULT_WEB_SOCKET_SECURE_PORT)):
+ hostport += ':' + str(port)
+ # 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
+ # character, and /hostport/, to /fields/.
+ return '%s: %s\r\n' % (common.HOST_HEADER, hostport)
+
+
+def _receive_bytes(socket, length):
+ bytes = []
+ remaining = length
+ while remaining > 0:
+ received_bytes = socket.recv(remaining)
+ if not received_bytes:
+ raise IOError(
+ 'Connection closed before receiving requested length '
+ '(requested %d bytes but received only %d bytes)' %
+ (length, length - remaining))
+ bytes.append(received_bytes)
+ remaining -= len(received_bytes)
+ return ''.join(bytes)
+
+
+def _get_mandatory_header(fields, name):
+ """Gets the value of the header specified by name from fields.
+
+ This function expects that there's only one header with the specified name
+ in fields. Otherwise, raises an ClientHandshakeError.
+ """
+
+ values = fields.get(name.lower())
+ if values is None or len(values) == 0:
+ raise ClientHandshakeError(
+ '%s header not found: %r' % (name, values))
+ if len(values) > 1:
+ raise ClientHandshakeError(
+ 'Multiple %s headers found: %r' % (name, values))
+ return values[0]
+
+
+def _validate_mandatory_header(fields, name,
+ expected_value, case_sensitive=False):
+ """Gets and validates the value of the header specified by name from
+ fields.
+
+ If expected_value is specified, compares expected value and actual value
+ and raises an ClientHandshakeError on failure. You can specify case
+ sensitiveness in this comparison by case_sensitive parameter. This function
+ expects that there's only one header with the specified name in fields.
+ Otherwise, raises an ClientHandshakeError.
+ """
+
+ value = _get_mandatory_header(fields, name)
+
+ if ((case_sensitive and value != expected_value) or
+ (not case_sensitive and value.lower() != expected_value.lower())):
+ raise ClientHandshakeError(
+ 'Illegal value for header %s: %r (expected) vs %r (actual)' %
+ (name, expected_value, value))
+
+
+class _TLSSocket(object):
+ """Wrapper for a TLS connection."""
+
+ def __init__(self,
+ raw_socket, tls_module, tls_version, disable_tls_compression):
+ self._logger = util.get_class_logger(self)
+
+ if tls_module == _TLS_BY_STANDARD_MODULE:
+ if tls_version == _TLS_VERSION_SSL23:
+ version = ssl.PROTOCOL_SSLv23
+ elif tls_version == _TLS_VERSION_SSL3:
+ version = ssl.PROTOCOL_SSLv3
+ elif tls_version == _TLS_VERSION_TLS1:
+ version = ssl.PROTOCOL_TLSv1
+ else:
+ raise ValueError(
+ 'Invalid --tls-version flag: %r' % tls_version)
+
+ if disable_tls_compression:
+ raise ValueError(
+ '--disable-tls-compression is not available for ssl '
+ 'module')
+
+ self._tls_socket = ssl.wrap_socket(raw_socket, ssl_version=version)
+
+ # Print cipher in use. Handshake is done on wrap_socket call.
+ self._logger.info("Cipher: %s", self._tls_socket.cipher())
+ elif tls_module == _TLS_BY_PYOPENSSL:
+ if tls_version == _TLS_VERSION_SSL23:
+ version = OpenSSL.SSL.SSLv23_METHOD
+ elif tls_version == _TLS_VERSION_SSL3:
+ version = OpenSSL.SSL.SSLv3_METHOD
+ elif tls_version == _TLS_VERSION_TLS1:
+ version = OpenSSL.SSL.TLSv1_METHOD
+ else:
+ raise ValueError(
+ 'Invalid --tls-version flag: %r' % tls_version)
+
+ context = OpenSSL.SSL.Context(version)
+
+ if disable_tls_compression:
+ # OP_NO_COMPRESSION is not defined in OpenSSL module.
+ context.set_options(0x00020000)
+
+ self._tls_socket = OpenSSL.SSL.Connection(context, raw_socket)
+ # Client mode.
+ self._tls_socket.set_connect_state()
+ self._tls_socket.setblocking(True)
+
+ # Do handshake now (not necessary).
+ self._tls_socket.do_handshake()
+ else:
+ raise ValueError('No TLS support module is available')
+
+ def send(self, data):
+ return self._tls_socket.write(data)
+
+ def sendall(self, data):
+ return self._tls_socket.sendall(data)
+
+ def recv(self, size=-1):
+ return self._tls_socket.read(size)
+
+ def close(self):
+ return self._tls_socket.close()
+
+ def getpeername(self):
+ return self._tls_socket.getpeername()
+
+
+class ClientHandshakeBase(object):
+ """A base class for WebSocket opening handshake processors for each
+ protocol version.
+ """
+
+ def __init__(self):
+ self._logger = util.get_class_logger(self)
+
+ def _read_fields(self):
+ # 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
+ fields = {}
+ while True: # "Field"
+ # 4.1 33. let /name/ and /value/ be empty byte arrays
+ name = ''
+ value = ''
+ # 4.1 34. read /name/
+ name = self._read_name()
+ if name is None:
+ break
+ # 4.1 35. read spaces
+ # TODO(tyoshino): Skip only one space as described in the spec.
+ ch = self._skip_spaces()
+ # 4.1 36. read /value/
+ value = self._read_value(ch)
+ # 4.1 37. read a byte from the server
+ ch = _receive_bytes(self._socket, 1)
+ if ch != '\n': # 0x0A
+ raise ClientHandshakeError(
+ 'Expected LF but found %r while reading value %r for '
+ 'header %r' % (ch, value, name))
+ self._logger.debug('Received %r header', name)
+ # 4.1 38. append an entry to the /fields/ list that has the name
+ # given by the string obtained by interpreting the /name/ byte
+ # array as a UTF-8 stream and the value given by the string
+ # obtained by interpreting the /value/ byte array as a UTF-8 byte
+ # stream.
+ fields.setdefault(name, []).append(value)
+ # 4.1 39. return to the "Field" step above
+ return fields
+
+ def _read_name(self):
+ # 4.1 33. let /name/ be empty byte arrays
+ name = ''
+ while True:
+ # 4.1 34. read a byte from the server
+ ch = _receive_bytes(self._socket, 1)
+ if ch == '\r': # 0x0D
+ return None
+ elif ch == '\n': # 0x0A
+ raise ClientHandshakeError(
+ 'Unexpected LF when reading header name %r' % name)
+ elif ch == ':': # 0x3A
+ return name
+ elif ch >= 'A' and ch <= 'Z': # Range 0x31 to 0x5A
+ ch = chr(ord(ch) + 0x20)
+ name += ch
+ else:
+ name += ch
+
+ def _skip_spaces(self):
+ # 4.1 35. read a byte from the server
+ while True:
+ ch = _receive_bytes(self._socket, 1)
+ if ch == ' ': # 0x20
+ continue
+ return ch
+
+ def _read_value(self, ch):
+ # 4.1 33. let /value/ be empty byte arrays
+ value = ''
+ # 4.1 36. read a byte from server.
+ while True:
+ if ch == '\r': # 0x0D
+ return value
+ elif ch == '\n': # 0x0A
+ raise ClientHandshakeError(
+ 'Unexpected LF when reading header value %r' % value)
+ else:
+ value += ch
+ ch = _receive_bytes(self._socket, 1)
+
+
+def _get_permessage_deflate_framer(extension_response):
+ """Validate the response and return a framer object using the parameters in
+ the response. This method doesn't accept the server_.* parameters.
+ """
+
+ client_max_window_bits = None
+ client_no_context_takeover = None
+
+ client_max_window_bits_name = (
+ PerMessageDeflateExtensionProcessor.
+ _CLIENT_MAX_WINDOW_BITS_PARAM)
+ client_no_context_takeover_name = (
+ PerMessageDeflateExtensionProcessor.
+ _CLIENT_NO_CONTEXT_TAKEOVER_PARAM)
+
+ # We didn't send any server_.* parameter.
+ # Handle those parameters as invalid if found in the response.
+
+ for param_name, param_value in extension_response.get_parameters():
+ if param_name == client_max_window_bits_name:
+ if client_max_window_bits is not None:
+ raise ClientHandshakeError(
+ 'Multiple %s found' % client_max_window_bits_name)
+
+ parsed_value = _parse_window_bits(param_value)
+ if parsed_value is None:
+ raise ClientHandshakeError(
+ 'Bad %s: %r' %
+ (client_max_window_bits_name, param_value))
+ client_max_window_bits = parsed_value
+ elif param_name == client_no_context_takeover_name:
+ if client_no_context_takeover is not None:
+ raise ClientHandshakeError(
+ 'Multiple %s found' % client_no_context_takeover_name)
+
+ if param_value is not None:
+ raise ClientHandshakeError(
+ 'Bad %s: Has value %r' %
+ (client_no_context_takeover_name, param_value))
+ client_no_context_takeover = True
+
+ if client_no_context_takeover is None:
+ client_no_context_takeover = False
+
+ return _PerMessageDeflateFramer(client_max_window_bits,
+ client_no_context_takeover)
+
+
+class ClientHandshakeProcessor(ClientHandshakeBase):
+ """WebSocket opening handshake processor for
+ draft-ietf-hybi-thewebsocketprotocol-06 and later.
+ """
+
+ def __init__(self, socket, options):
+ super(ClientHandshakeProcessor, self).__init__()
+
+ self._socket = socket
+ self._options = options
+
+ self._logger = util.get_class_logger(self)
+
+ def handshake(self):
+ """Performs opening handshake on the specified socket.
+
+ Raises:
+ ClientHandshakeError: handshake failed.
+ """
+
+ request_line = _build_method_line(self._options.resource)
+ self._logger.debug('Client\'s opening handshake Request-Line: %r',
+ request_line)
+ self._socket.sendall(request_line)
+
+ fields = []
+ fields.append(_format_host_header(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.use_tls))
+ fields.append(_UPGRADE_HEADER)
+ fields.append(_CONNECTION_HEADER)
+ if self._options.origin is not None:
+ if self._options.protocol_version == _PROTOCOL_VERSION_HYBI08:
+ fields.append(_origin_header(
+ common.SEC_WEBSOCKET_ORIGIN_HEADER,
+ self._options.origin))
+ else:
+ fields.append(_origin_header(common.ORIGIN_HEADER,
+ self._options.origin))
+
+ original_key = os.urandom(16)
+ self._key = base64.b64encode(original_key)
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_KEY_HEADER,
+ self._key,
+ util.hexify(original_key))
+ fields.append(
+ '%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY_HEADER, self._key))
+
+ if self._options.version_header > 0:
+ fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
+ self._options.version_header))
+ elif self._options.protocol_version == _PROTOCOL_VERSION_HYBI08:
+ fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
+ common.VERSION_HYBI08))
+ else:
+ fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
+ common.VERSION_HYBI_LATEST))
+
+ extensions_to_request = []
+
+ if self._options.deflate_frame:
+ extensions_to_request.append(
+ common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION))
+
+ if self._options.use_permessage_deflate:
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ # Accept the client_max_window_bits extension parameter by default.
+ extension.add_parameter(
+ PerMessageDeflateExtensionProcessor.
+ _CLIENT_MAX_WINDOW_BITS_PARAM,
+ None)
+ extensions_to_request.append(extension)
+
+ if len(extensions_to_request) != 0:
+ fields.append(
+ '%s: %s\r\n' %
+ (common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(extensions_to_request)))
+
+ for field in fields:
+ self._socket.sendall(field)
+
+ self._socket.sendall('\r\n')
+
+ self._logger.debug('Sent client\'s opening handshake headers: %r',
+ fields)
+ self._logger.debug('Start reading Status-Line')
+
+ status_line = ''
+ while True:
+ ch = _receive_bytes(self._socket, 1)
+ status_line += ch
+ if ch == '\n':
+ break
+
+ m = re.match('HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line)
+ if m is None:
+ raise ClientHandshakeError(
+ 'Wrong status line format: %r' % status_line)
+ status_code = m.group(1)
+ if status_code != '101':
+ self._logger.debug('Unexpected status code %s with following '
+ 'headers: %r', status_code, self._read_fields())
+ raise ClientHandshakeError(
+ 'Expected HTTP status code 101 but found %r' % status_code)
+
+ self._logger.debug('Received valid Status-Line')
+ self._logger.debug('Start reading headers until we see an empty line')
+
+ fields = self._read_fields()
+
+ ch = _receive_bytes(self._socket, 1)
+ if ch != '\n': # 0x0A
+ raise ClientHandshakeError(
+ 'Expected LF but found %r while reading value %r for header '
+ 'name %r' % (ch, value, name))
+
+ self._logger.debug('Received an empty line')
+ self._logger.debug('Server\'s opening handshake headers: %r', fields)
+
+ _validate_mandatory_header(
+ fields,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE,
+ False)
+
+ _validate_mandatory_header(
+ fields,
+ common.CONNECTION_HEADER,
+ common.UPGRADE_CONNECTION_TYPE,
+ False)
+
+ accept = _get_mandatory_header(
+ fields, common.SEC_WEBSOCKET_ACCEPT_HEADER)
+
+ # Validate
+ try:
+ binary_accept = base64.b64decode(accept)
+ except TypeError, e:
+ raise HandshakeError(
+ 'Illegal value for header %s: %r' %
+ (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+
+ if len(binary_accept) != 20:
+ raise ClientHandshakeError(
+ 'Decoded value of %s is not 20-byte long' %
+ common.SEC_WEBSOCKET_ACCEPT_HEADER)
+
+ self._logger.debug(
+ 'Response for challenge : %r (%s)',
+ accept, util.hexify(binary_accept))
+
+ binary_expected_accept = util.sha1_hash(
+ self._key + common.WEBSOCKET_ACCEPT_UUID).digest()
+ expected_accept = base64.b64encode(binary_expected_accept)
+
+ self._logger.debug(
+ 'Expected response for challenge: %r (%s)',
+ expected_accept, util.hexify(binary_expected_accept))
+
+ if accept != expected_accept:
+ raise ClientHandshakeError(
+ 'Invalid %s header: %r (expected: %s)' %
+ (common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept))
+
+ deflate_frame_accepted = False
+ permessage_deflate_accepted = False
+
+ extensions_header = fields.get(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower())
+ accepted_extensions = []
+ if extensions_header is not None and len(extensions_header) != 0:
+ accepted_extensions = common.parse_extensions(extensions_header[0])
+
+ # TODO(bashi): Support the new style perframe compression extension.
+ for extension in accepted_extensions:
+ extension_name = extension.name()
+ if (extension_name == common.DEFLATE_FRAME_EXTENSION and
+ self._options.deflate_frame):
+ deflate_frame_accepted = True
+ processor = DeflateFrameExtensionProcessor(extension)
+ unused_extension_response = processor.get_extension_response()
+ self._options.deflate_frame = processor
+ continue
+ elif (extension_name == common.PERMESSAGE_DEFLATE_EXTENSION and
+ self._options.use_permessage_deflate):
+ permessage_deflate_accepted = True
+
+ framer = _get_permessage_deflate_framer(extension)
+ framer.set_compress_outgoing_enabled(True)
+ self._options.use_permessage_deflate = framer
+ continue
+
+ raise ClientHandshakeError(
+ 'Unexpected extension %r' % extension_name)
+
+ if (self._options.deflate_frame and not deflate_frame_accepted):
+ raise ClientHandshakeError(
+ 'Requested %s, but the server rejected it' %
+ common.DEFLATE_FRAME_EXTENSION)
+
+ if (self._options.use_permessage_deflate and
+ not permessage_deflate_accepted):
+ raise ClientHandshakeError(
+ 'Requested %s, but the server rejected it' %
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+
+ # TODO(tyoshino): Handle Sec-WebSocket-Protocol
+ # TODO(tyoshino): Handle Cookie, etc.
+
+
+class ClientHandshakeProcessorHybi00(ClientHandshakeBase):
+ """WebSocket opening handshake processor for
+ draft-ietf-hybi-thewebsocketprotocol-00 (equivalent to
+ draft-hixie-thewebsocketprotocol-76).
+ """
+
+ def __init__(self, socket, options):
+ super(ClientHandshakeProcessorHybi00, self).__init__()
+
+ self._socket = socket
+ self._options = options
+
+ self._logger = util.get_class_logger(self)
+
+ if (self._options.deflate_frame or
+ self._options.use_permessage_deflate):
+ logging.critical('HyBi 00 doesn\'t support extensions.')
+ sys.exit(1)
+
+ def handshake(self):
+ """Performs opening handshake on the specified socket.
+
+ Raises:
+ ClientHandshakeError: handshake failed.
+ """
+
+ # 4.1 5. send request line.
+ self._socket.sendall(_build_method_line(self._options.resource))
+ # 4.1 6. Let /fields/ be an empty list of strings.
+ fields = []
+ # 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
+ fields.append(_UPGRADE_HEADER_HIXIE75)
+ # 4.1 8. Add the string "Connection: Upgrade" to /fields/.
+ fields.append(_CONNECTION_HEADER)
+ # 4.1 9-12. Add Host: field to /fields/.
+ fields.append(_format_host_header(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.use_tls))
+ # 4.1 13. Add Origin: field to /fields/.
+ if not self._options.origin:
+ raise ClientHandshakeError(
+ 'Specify the origin of the connection by --origin flag')
+ fields.append(_origin_header(common.ORIGIN_HEADER,
+ self._options.origin))
+ # TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
+ # TODO: 4.1 15 Add cookie headers to /fields/.
+
+ # 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
+ self._number1, key1 = self._generate_sec_websocket_key()
+ self._logger.debug('Number1: %d', self._number1)
+ fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY1_HEADER, key1))
+ self._number2, key2 = self._generate_sec_websocket_key()
+ self._logger.debug('Number2: %d', self._number2)
+ fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY2_HEADER, key2))
+
+ fields.append('%s: 0\r\n' % common.SEC_WEBSOCKET_DRAFT_HEADER)
+
+ # 4.1 24. For each string in /fields/, in a random order: send the
+ # string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
+ # RETURN U+000A LINE FEED character pair (CRLF).
+ random.shuffle(fields)
+ for field in fields:
+ self._socket.sendall(field)
+ # 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
+ # character pair (CRLF).
+ self._socket.sendall('\r\n')
+ # 4.1 26. let /key3/ be a string consisting of eight random bytes (or
+ # equivalently, a random 64 bit integer encoded in a big-endian order).
+ self._key3 = self._generate_key3()
+ # 4.1 27. send /key3/ to the server.
+ self._socket.sendall(self._key3)
+ self._logger.debug(
+ 'Key3: %r (%s)', self._key3, util.hexify(self._key3))
+
+ self._logger.info('Sent handshake')
+
+ # 4.1 28. Read bytes from the server until either the connection
+ # closes, or a 0x0A byte is read. let /field/ be these bytes, including
+ # the 0x0A bytes.
+ field = ''
+ while True:
+ ch = _receive_bytes(self._socket, 1)
+ field += ch
+ if ch == '\n':
+ break
+ # if /field/ is not at least seven bytes long, or if the last
+ # two bytes aren't 0x0D and 0x0A respectively, or if it does not
+ # contain at least two 0x20 bytes, then fail the WebSocket connection
+ # and abort these steps.
+ if len(field) < 7 or not field.endswith('\r\n'):
+ raise ClientHandshakeError('Wrong status line: %r' % field)
+ m = re.match('[^ ]* ([^ ]*) .*', field)
+ if m is None:
+ raise ClientHandshakeError(
+ 'No HTTP status code found in status line: %r' % field)
+ # 4.1 29. let /code/ be the substring of /field/ that starts from the
+ # byte after the first 0x20 byte, and ends with the byte before the
+ # second 0x20 byte.
+ code = m.group(1)
+ # 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
+ # /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
+ # connection and abort these steps.
+ if not re.match('[0-9][0-9][0-9]', code):
+ raise ClientHandshakeError(
+ 'HTTP status code %r is not three digit in status line: %r' %
+ (code, field))
+ # 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
+ # next step.
+ if code != '101':
+ raise ClientHandshakeError(
+ 'Expected HTTP status code 101 but found %r in status line: '
+ '%r' % (code, field))
+ # 4.1 32-39. read fields into /fields/
+ fields = self._read_fields()
+ # 4.1 40. _Fields processing_
+ # read a byte from server
+ ch = _receive_bytes(self._socket, 1)
+ if ch != '\n': # 0x0A
+ raise ClientHandshakeError('Expected LF but found %r' % ch)
+ # 4.1 41. check /fields/
+ # TODO(ukai): protocol
+ # if the entry's name is "upgrade"
+ # if the value is not exactly equal to the string "WebSocket",
+ # then fail the WebSocket connection and abort these steps.
+ _validate_mandatory_header(
+ fields,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE_HIXIE75,
+ True)
+ # if the entry's name is "connection"
+ # if the value, converted to ASCII lowercase, is not exactly equal
+ # to the string "upgrade", then fail the WebSocket connection and
+ # abort these steps.
+ _validate_mandatory_header(
+ fields,
+ common.CONNECTION_HEADER,
+ common.UPGRADE_CONNECTION_TYPE,
+ False)
+
+ origin = _get_mandatory_header(
+ fields, common.SEC_WEBSOCKET_ORIGIN_HEADER)
+
+ location = _get_mandatory_header(
+ fields, common.SEC_WEBSOCKET_LOCATION_HEADER)
+
+ # TODO(ukai): check origin, location, cookie, ..
+
+ # 4.1 42. let /challenge/ be the concatenation of /number_1/,
+ # expressed as a big endian 32 bit integer, /number_2/, expressed
+ # as big endian 32 bit integer, and the eight bytes of /key_3/ in the
+ # order they were sent on the wire.
+ challenge = struct.pack('!I', self._number1)
+ challenge += struct.pack('!I', self._number2)
+ challenge += self._key3
+
+ self._logger.debug(
+ 'Challenge: %r (%s)', challenge, util.hexify(challenge))
+
+ # 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
+ # big-endian 128 bit string.
+ expected = util.md5_hash(challenge).digest()
+ self._logger.debug(
+ 'Expected challenge response: %r (%s)',
+ expected, util.hexify(expected))
+
+ # 4.1 44. read sixteen bytes from the server.
+ # let /reply/ be those bytes.
+ reply = _receive_bytes(self._socket, 16)
+ self._logger.debug(
+ 'Actual challenge response: %r (%s)', reply, util.hexify(reply))
+
+ # 4.1 45. if /reply/ does not exactly equal /expected/, then fail
+ # the WebSocket connection and abort these steps.
+ if expected != reply:
+ raise ClientHandshakeError(
+ 'Bad challenge response: %r (expected) != %r (actual)' %
+ (expected, reply))
+ # 4.1 46. The *WebSocket connection is established*.
+
+ def _generate_sec_websocket_key(self):
+ # 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
+ spaces = random.randint(1, 12)
+ # 4.1 17. let /max_n/ be the largest integer not greater than
+ # 4,294,967,295 divided by /spaces_n/.
+ maxnum = 4294967295 / spaces
+ # 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
+ # inclusive.
+ number = random.randint(0, maxnum)
+ # 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
+ # /spaces_n/ together.
+ product = number * spaces
+ # 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
+ # in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
+ # U+0039 DIGIT NINE (9).
+ key = str(product)
+ # 4.1 21. insert between one and twelve random characters from the
+ # range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
+ # positions.
+ available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
+ n = random.randint(1, 12)
+ for _ in xrange(n):
+ ch = random.choice(available_chars)
+ pos = random.randint(0, len(key))
+ key = key[0:pos] + chr(ch) + key[pos:]
+ # 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
+ # random positions other than start or end of the string.
+ for _ in xrange(spaces):
+ pos = random.randint(1, len(key) - 1)
+ key = key[0:pos] + ' ' + key[pos:]
+ return number, key
+
+ def _generate_key3(self):
+ # 4.1 26. let /key3/ be a string consisting of eight random bytes (or
+ # equivalently, a random 64 bit integer encoded in a big-endian order).
+ return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
+
+
+class ClientConnection(object):
+ """A wrapper for socket object to provide the mp_conn interface.
+ mod_pywebsocket library is designed to be working on Apache mod_python's
+ mp_conn object.
+ """
+
+ def __init__(self, socket):
+ self._socket = socket
+
+ def write(self, data):
+ self._socket.sendall(data)
+
+ def read(self, n):
+ return self._socket.recv(n)
+
+ def get_remote_addr(self):
+ return self._socket.getpeername()
+ remote_addr = property(get_remote_addr)
+
+
+class ClientRequest(object):
+ """A wrapper class just to make it able to pass a socket object to
+ functions that expect a mp_request object.
+ """
+
+ def __init__(self, socket):
+ self._logger = util.get_class_logger(self)
+
+ self._socket = socket
+ self.connection = ClientConnection(socket)
+
+
+def _import_ssl():
+ global ssl
+ try:
+ import ssl
+ return True
+ except ImportError:
+ return False
+
+
+def _import_pyopenssl():
+ global OpenSSL
+ try:
+ import OpenSSL.SSL
+ return True
+ except ImportError:
+ return False
+
+
+class EchoClient(object):
+ """WebSocket echo client."""
+
+ def __init__(self, options):
+ self._options = options
+ self._socket = None
+
+ self._logger = util.get_class_logger(self)
+
+ def run(self):
+ """Run the client.
+
+ Shake hands and then repeat sending message and receiving its echo.
+ """
+
+ self._socket = socket.socket()
+ self._socket.settimeout(self._options.socket_timeout)
+ try:
+ self._socket.connect((self._options.server_host,
+ self._options.server_port))
+ if self._options.use_tls:
+ self._socket = _TLSSocket(
+ self._socket,
+ self._options.tls_module,
+ self._options.tls_version,
+ self._options.disable_tls_compression)
+
+ version = self._options.protocol_version
+
+ if (version == _PROTOCOL_VERSION_HYBI08 or
+ version == _PROTOCOL_VERSION_HYBI13):
+ self._handshake = ClientHandshakeProcessor(
+ self._socket, self._options)
+ elif version == _PROTOCOL_VERSION_HYBI00:
+ self._handshake = ClientHandshakeProcessorHybi00(
+ self._socket, self._options)
+ else:
+ raise ValueError(
+ 'Invalid --protocol-version flag: %r' % version)
+
+ self._handshake.handshake()
+
+ self._logger.info('Connection established')
+
+ request = ClientRequest(self._socket)
+
+ version_map = {
+ _PROTOCOL_VERSION_HYBI08: common.VERSION_HYBI08,
+ _PROTOCOL_VERSION_HYBI13: common.VERSION_HYBI13,
+ _PROTOCOL_VERSION_HYBI00: common.VERSION_HYBI00}
+ request.ws_version = version_map[version]
+
+ if (version == _PROTOCOL_VERSION_HYBI08 or
+ version == _PROTOCOL_VERSION_HYBI13):
+ stream_option = StreamOptions()
+ stream_option.mask_send = True
+ stream_option.unmask_receive = False
+
+ if self._options.deflate_frame is not False:
+ processor = self._options.deflate_frame
+ processor.setup_stream_options(stream_option)
+
+ if self._options.use_permessage_deflate is not False:
+ framer = self._options.use_permessage_deflate
+ framer.setup_stream_options(stream_option)
+
+ self._stream = Stream(request, stream_option)
+ elif version == _PROTOCOL_VERSION_HYBI00:
+ self._stream = StreamHixie75(request, True)
+
+ for line in self._options.message.split(','):
+ self._stream.send_message(line)
+ if self._options.verbose:
+ print 'Send: %s' % line
+ try:
+ received = self._stream.receive_message()
+
+ if self._options.verbose:
+ print 'Recv: %s' % received
+ except Exception, e:
+ if self._options.verbose:
+ print 'Error: %s' % e
+ raise
+
+ self._do_closing_handshake()
+ finally:
+ self._socket.close()
+
+ def _do_closing_handshake(self):
+ """Perform closing handshake using the specified closing frame."""
+
+ if self._options.message.split(',')[-1] == _GOODBYE_MESSAGE:
+ # requested server initiated closing handshake, so
+ # expecting closing handshake message from server.
+ self._logger.info('Wait for server-initiated closing handshake')
+ message = self._stream.receive_message()
+ if message is None:
+ print 'Recv close'
+ print 'Send ack'
+ self._logger.info(
+ 'Received closing handshake and sent ack')
+ return
+ print 'Send close'
+ self._stream.close_connection()
+ self._logger.info('Sent closing handshake')
+ print 'Recv ack'
+ self._logger.info('Received ack')
+
+
+def main():
+ sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
+
+ parser = OptionParser()
+ # We accept --command_line_flag style flags which is the same as Google
+ # gflags in addition to common --command-line-flag style flags.
+ parser.add_option('-s', '--server-host', '--server_host',
+ dest='server_host', type='string',
+ default='localhost', help='server host')
+ parser.add_option('-p', '--server-port', '--server_port',
+ dest='server_port', type='int',
+ default=_UNDEFINED_PORT, help='server port')
+ parser.add_option('-o', '--origin', dest='origin', type='string',
+ default=None, help='origin')
+ parser.add_option('-r', '--resource', dest='resource', type='string',
+ default='/echo', help='resource path')
+ parser.add_option('-m', '--message', dest='message', type='string',
+ help=('comma-separated messages to send. '
+ '%s will force close the connection from server.' %
+ _GOODBYE_MESSAGE))
+ parser.add_option('-q', '--quiet', dest='verbose', action='store_false',
+ default=True, help='suppress messages')
+ parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
+ default=False, help='use TLS (wss://). By default, '
+ 'it looks for ssl and pyOpenSSL module and uses found '
+ 'one. Use --tls-module option to specify which module '
+ 'to use')
+ parser.add_option('--tls-module', '--tls_module', dest='tls_module',
+ type='choice',
+ choices=[_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
+ help='Use ssl module if "%s" is specified. '
+ 'Use pyOpenSSL module if "%s" is specified' %
+ (_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
+ parser.add_option('--tls-version', '--tls_version',
+ dest='tls_version',
+ type='string', default=_TLS_VERSION_SSL23,
+ help='TLS/SSL version to use. One of \'' +
+ _TLS_VERSION_SSL23 + '\' (SSL version 2 or 3), \'' +
+ _TLS_VERSION_SSL3 + '\' (SSL version 3), \'' +
+ _TLS_VERSION_TLS1 + '\' (TLS version 1)')
+ parser.add_option('--disable-tls-compression', '--disable_tls_compression',
+ dest='disable_tls_compression',
+ action='store_true', default=False,
+ help='Disable TLS compression. Available only when '
+ 'pyOpenSSL module is used.')
+ parser.add_option('-k', '--socket-timeout', '--socket_timeout',
+ dest='socket_timeout', type='int', default=_TIMEOUT_SEC,
+ help='Timeout(sec) for sockets')
+ parser.add_option('--draft75', dest='draft75',
+ action='store_true', default=False,
+ help='Obsolete option. Don\'t use this.')
+ parser.add_option('--protocol-version', '--protocol_version',
+ dest='protocol_version',
+ type='string', default=_PROTOCOL_VERSION_HYBI13,
+ help='WebSocket protocol version to use. One of \'' +
+ _PROTOCOL_VERSION_HYBI13 + '\', \'' +
+ _PROTOCOL_VERSION_HYBI08 + '\', \'' +
+ _PROTOCOL_VERSION_HYBI00 + '\'')
+ parser.add_option('--version-header', '--version_header',
+ dest='version_header',
+ type='int', default=-1,
+ help='Specify Sec-WebSocket-Version header value')
+ parser.add_option('--deflate-frame', '--deflate_frame',
+ dest='deflate_frame',
+ action='store_true', default=False,
+ help='Use the deflate-frame extension.')
+ parser.add_option('--use-permessage-deflate', '--use_permessage_deflate',
+ dest='use_permessage_deflate',
+ action='store_true', default=False,
+ help='Use the permessage-deflate extension.')
+ parser.add_option('--log-level', '--log_level', type='choice',
+ dest='log_level', default='warn',
+ choices=['debug', 'info', 'warn', 'error', 'critical'],
+ help='Log level.')
+
+ (options, unused_args) = parser.parse_args()
+
+ logging.basicConfig(level=logging.getLevelName(options.log_level.upper()))
+
+ if options.draft75:
+ logging.critical('--draft75 option is obsolete.')
+ sys.exit(1)
+
+ if options.protocol_version == _PROTOCOL_VERSION_HIXIE75:
+ logging.critical(
+ 'Value %s is obsolete for --protocol_version options' %
+ _PROTOCOL_VERSION_HIXIE75)
+ sys.exit(1)
+
+ if options.use_tls:
+ if options.tls_module is None:
+ if _import_ssl():
+ options.tls_module = _TLS_BY_STANDARD_MODULE
+ logging.debug('Using ssl module')
+ elif _import_pyopenssl():
+ options.tls_module = _TLS_BY_PYOPENSSL
+ logging.debug('Using pyOpenSSL module')
+ else:
+ logging.critical(
+ 'TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if not _import_ssl():
+ logging.critical('ssl module is not available')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_PYOPENSSL:
+ if not _import_pyopenssl():
+ logging.critical('pyOpenSSL module is not available')
+ sys.exit(1)
+ else:
+ logging.critical('Invalid --tls-module option: %r',
+ options.tls_module)
+ sys.exit(1)
+
+ if (options.disable_tls_compression and
+ options.tls_module != _TLS_BY_PYOPENSSL):
+ logging.critical('You can disable TLS compression only when '
+ 'pyOpenSSL module is used.')
+ sys.exit(1)
+ else:
+ if options.tls_module is not None:
+ logging.critical('Use --tls-module option only together with '
+ '--use-tls option.')
+ sys.exit(1)
+
+ if options.disable_tls_compression:
+ logging.critical('Use --disable-tls-compression only together '
+ 'with --use-tls option.')
+ sys.exit(1)
+
+ # Default port number depends on whether TLS is used.
+ if options.server_port == _UNDEFINED_PORT:
+ if options.use_tls:
+ options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT
+ else:
+ options.server_port = common.DEFAULT_WEB_SOCKET_PORT
+
+ # optparse doesn't seem to handle non-ascii default values.
+ # Set default message here.
+ if not options.message:
+ options.message = u'Hello,\u65e5\u672c' # "Japan" in Japanese
+
+ EchoClient(options).run()
+
+
+if __name__ == '__main__':
+ main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/echo_noext_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/echo_noext_wsh.py
new file mode 100644
index 000000000..1df515122
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/echo_noext_wsh.py
@@ -0,0 +1,61 @@
+# Copyright 2013, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+_GOODBYE_MESSAGE = u'Goodbye'
+
+
+def web_socket_do_extra_handshake(request):
+ """Received Sec-WebSocket-Extensions header value is parsed into
+ request.ws_requested_extensions. pywebsocket creates extension
+ processors using it before do_extra_handshake call and never looks at it
+ after the call.
+
+ To reject requested extensions, clear the processor list.
+ """
+
+ request.ws_extension_processors = []
+
+
+def web_socket_transfer_data(request):
+ """Echo. Same as echo_wsh.py."""
+
+ while True:
+ line = request.ws_stream.receive_message()
+ if line is None:
+ return
+ if isinstance(line, unicode):
+ request.ws_stream.send_message(line, binary=False)
+ if line == _GOODBYE_MESSAGE:
+ return
+ else:
+ request.ws_stream.send_message(line, binary=True)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/echo_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/echo_wsh.py
new file mode 100644
index 000000000..38646c32c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/echo_wsh.py
@@ -0,0 +1,54 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+_GOODBYE_MESSAGE = u'Goodbye'
+
+
+def web_socket_do_extra_handshake(request):
+ # This example handler accepts any request. See origin_check_wsh.py for how
+ # to reject access from untrusted scripts based on origin value.
+
+ pass # Always accept.
+
+
+def web_socket_transfer_data(request):
+ while True:
+ line = request.ws_stream.receive_message()
+ if line is None:
+ return
+ if isinstance(line, unicode):
+ request.ws_stream.send_message(line, binary=False)
+ if line == _GOODBYE_MESSAGE:
+ return
+ else:
+ request.ws_stream.send_message(line, binary=True)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.cgi b/testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.cgi
new file mode 100755
index 000000000..adddf237c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.cgi
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+
+# Copyright 2013, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This CGI script generates text/event-stream type data stream for testing
+the Server-Sent Events.
+
+It will only work correctly with HTTP servers that do not buffer the output of
+CGI scripts.
+"""
+
+
+import sys
+import time
+
+sys.stdout.write('Content-type: text/event-stream\r\n\r\n')
+
+id = 0
+
+while True:
+ sys.stdout.write('data: Hello\r\nid: %d\r\n\r\n' % id)
+ sys.stdout.flush()
+
+ id = id + 1
+
+ time.sleep(1)
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.html b/testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.html
new file mode 100644
index 000000000..1598a8807
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/eventsource.html
@@ -0,0 +1,74 @@
+<!--
+Copyright 2013, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<!--
+Simple example of the Server-Sent Events
+http://dev.w3.org/html5/eventsource/
+
+For comparison with the WebSocket Protocol & API.
+
+Run the pywebsocket with the --cgi_path parameter.
+-->
+
+<html>
+<head>
+<title>Server-Sent Events Example</title>
+<script>
+var eventSource = null;
+
+function addToLog(data) {
+ logBox.value += data + '\n';
+ logBox.scrollTop = 1000000;
+}
+
+function init() {
+ logBox = document.getElementById('log');
+
+ eventSource = new EventSource('/eventsource.cgi');
+ eventSource.onopen = function() {
+ addToLog('onopen (readyState = ' + eventSource.readyState + ')');
+ }
+ eventSource.onmessage = function(event) {
+ addToLog(event.data);
+ }
+ eventSource.onerror = function(event) {
+ addToLog('onerror (readyState = ' + eventSource.readyState + ')');
+ }
+}
+</script>
+</head>
+<body onload="init()">
+<textarea id="log" rows="10" cols="40" readonly></textarea>
+<p style="font-size: small">
+ Make sure that pywebsocket is run with --cgi_path parameter.
+</p>
+</body>
+</html>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/handler_map.txt b/testing/web-platform/tests/tools/pywebsocket/src/example/handler_map.txt
new file mode 100644
index 000000000..21c4c09aa
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/handler_map.txt
@@ -0,0 +1,11 @@
+# websocket handler map file, used by standalone.py -m option.
+# A line starting with '#' is a comment line.
+# Each line consists of 'alias_resource_path' and 'existing_resource_path'
+# separated by spaces.
+# Aliasing is processed from the top to the bottom of the line, and
+# 'existing_resource_path' must exist before it is aliased.
+# For example,
+# / /echo
+# means that a request to '/' will be handled by handlers for '/echo'.
+/ /echo
+
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/hsts_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/hsts_wsh.py
new file mode 100644
index 000000000..e86194692
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/hsts_wsh.py
@@ -0,0 +1,40 @@
+# Copyright 2013, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def web_socket_do_extra_handshake(request):
+ request.extra_headers.append(
+ ('Strict-Transport-Security', 'max-age=86400'))
+
+
+def web_socket_transfer_data(request):
+ request.ws_stream.send_message('Hello', binary=False)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/internal_error_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/internal_error_wsh.py
new file mode 100644
index 000000000..fe581b54a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/internal_error_wsh.py
@@ -0,0 +1,42 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import msgutil
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ raise msgutil.BadOperationException('Intentional')
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/origin_check_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/example/origin_check_wsh.py
new file mode 100644
index 000000000..e05767ab9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/origin_check_wsh.py
@@ -0,0 +1,44 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This example is derived from test/testdata/handlers/origin_check_wsh.py.
+
+
+def web_socket_do_extra_handshake(request):
+ if request.ws_origin == 'http://example.com':
+ return
+ raise ValueError('Unacceptable origin: %r' % request.ws_origin)
+
+
+def web_socket_transfer_data(request):
+ request.connection.write('origin_check_wsh.py is called for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/pywebsocket.conf b/testing/web-platform/tests/tools/pywebsocket/src/example/pywebsocket.conf
new file mode 100644
index 000000000..335d130a5
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/pywebsocket.conf
@@ -0,0 +1,42 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# Sample configuration file for apache2
+#
+LogLevel debug
+<IfModule python_module>
+ PythonPath "sys.path+['/mod_pywebsocket']"
+ PythonOption mod_pywebsocket.handler_root /var/www
+ PythonOption mod_pywebsocket.handler_scan /var/www/ws
+ #PythonOption mod_pywebsocket.allow_draft75 On
+ <Location /ws>
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+ </Location>
+</IfModule>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/special_headers.cgi b/testing/web-platform/tests/tools/pywebsocket/src/example/special_headers.cgi
new file mode 100755
index 000000000..ea5080f1f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/special_headers.cgi
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Use of this source code is governed by a BSD-style
+# license that can be found in the COPYING file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+"""CGI script sample for testing effect of HTTP headers on the origin page.
+
+Note that CGI scripts don't work on the standalone pywebsocket running in TLS
+mode.
+"""
+
+
+print """Content-type: text/html
+Content-Security-Policy: connect-src self
+
+<html>
+<head>
+<title></title>
+</head>
+<body>
+<script>
+var socket = new WebSocket("ws://example.com");
+</script>
+</body>
+</html>"""
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/util.js b/testing/web-platform/tests/tools/pywebsocket/src/example/util.js
new file mode 100644
index 000000000..a1cad4975
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/util.js
@@ -0,0 +1,177 @@
+// Copyright 2013, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Utilities for example applications (for both main and worker thread).
+
+var results = {};
+
+function getTimeStamp() {
+ return Date.now();
+}
+
+function formatResultInKiB(size, timePerMessageInMs, stddevTimePerMessageInMs,
+ speed, printSize) {
+ if (printSize) {
+ return (size / 1024) +
+ '\t' + timePerMessageInMs.toFixed(3) +
+ (stddevTimePerMessageInMs == -1 ?
+ '' :
+ '\t' + stddevTimePerMessageInMs.toFixed(3)) +
+ '\t' + speed.toFixed(3);
+ } else {
+ return speed.toString();
+ }
+}
+
+function clearAverageData() {
+ results = {};
+}
+
+function reportAverageData(config) {
+ config.addToSummary(
+ 'Size[KiB]\tAverage time[ms]\tStddev time[ms]\tSpeed[KB/s]');
+ for (var size in results) {
+ var averageTimePerMessageInMs = results[size].sum_t / results[size].n;
+ var speed = calculateSpeedInKB(size, averageTimePerMessageInMs);
+ // Calculate sample standard deviation
+ var stddevTimePerMessageInMs = Math.sqrt(
+ (results[size].sum_t2 / results[size].n -
+ averageTimePerMessageInMs * averageTimePerMessageInMs) *
+ results[size].n /
+ (results[size].n - 1));
+ config.addToSummary(formatResultInKiB(
+ size, averageTimePerMessageInMs, stddevTimePerMessageInMs, speed,
+ true));
+ }
+}
+
+function calculateSpeedInKB(size, timeSpentInMs) {
+ return Math.round(size / timeSpentInMs * 1000) / 1000;
+}
+
+function calculateAndLogResult(config, size, startTimeInMs, totalSize) {
+ var timeSpentInMs = getTimeStamp() - startTimeInMs;
+ var speed = calculateSpeedInKB(totalSize, timeSpentInMs);
+ var timePerMessageInMs = timeSpentInMs / (totalSize / size);
+ if (!results[size]) {
+ results[size] = {n: 0, sum_t: 0, sum_t2: 0};
+ }
+ config.measureValue(timePerMessageInMs);
+ results[size].n ++;
+ results[size].sum_t += timePerMessageInMs;
+ results[size].sum_t2 += timePerMessageInMs * timePerMessageInMs;
+ config.addToLog(formatResultInKiB(size, timePerMessageInMs, -1, speed,
+ config.printSize));
+}
+
+function fillArrayBuffer(buffer, c) {
+ var i;
+
+ var u32Content = c * 0x01010101;
+
+ var u32Blocks = Math.floor(buffer.byteLength / 4);
+ var u32View = new Uint32Array(buffer, 0, u32Blocks);
+ // length attribute is slow on Chrome. Don't use it for loop condition.
+ for (i = 0; i < u32Blocks; ++i) {
+ u32View[i] = u32Content;
+ }
+
+ // Fraction
+ var u8Blocks = buffer.byteLength - u32Blocks * 4;
+ var u8View = new Uint8Array(buffer, u32Blocks * 4, u8Blocks);
+ for (i = 0; i < u8Blocks; ++i) {
+ u8View[i] = c;
+ }
+}
+
+function verifyArrayBuffer(buffer, expectedChar) {
+ var i;
+
+ var expectedU32Value = expectedChar * 0x01010101;
+
+ var u32Blocks = Math.floor(buffer.byteLength / 4);
+ var u32View = new Uint32Array(buffer, 0, u32Blocks);
+ for (i = 0; i < u32Blocks; ++i) {
+ if (u32View[i] != expectedU32Value) {
+ return false;
+ }
+ }
+
+ var u8Blocks = buffer.byteLength - u32Blocks * 4;
+ var u8View = new Uint8Array(buffer, u32Blocks * 4, u8Blocks);
+ for (i = 0; i < u8Blocks; ++i) {
+ if (u8View[i] != expectedChar) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+function verifyBlob(config, blob, expectedChar, doneCallback) {
+ var reader = new FileReader(blob);
+ reader.onerror = function() {
+ config.addToLog('FileReader Error: ' + reader.error.message);
+ doneCallback(blob.size, false);
+ }
+ reader.onloadend = function() {
+ var result = verifyArrayBuffer(reader.result, expectedChar);
+ doneCallback(blob.size, result);
+ }
+ reader.readAsArrayBuffer(blob);
+}
+
+function verifyAcknowledgement(config, message, size) {
+ if (typeof message != 'string') {
+ config.addToLog('Invalid ack type: ' + typeof message);
+ return false;
+ }
+ var parsedAck = parseInt(message);
+ if (isNaN(parsedAck)) {
+ config.addToLog('Invalid ack value: ' + message);
+ return false;
+ }
+ if (parsedAck != size) {
+ config.addToLog(
+ 'Expected ack for ' + size + 'B but received one for ' + parsedAck +
+ 'B');
+ return false;
+ }
+
+ return true;
+}
+
+function cloneConfig(obj) {
+ var newObj = {};
+ for (key in obj) {
+ newObj[key] = obj[key];
+ }
+ return newObj;
+}
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/util_main.js b/testing/web-platform/tests/tools/pywebsocket/src/example/util_main.js
new file mode 100644
index 000000000..b03d1c2bd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/util_main.js
@@ -0,0 +1,63 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the COPYING file or at
+// https://developers.google.com/open-source/licenses/bsd
+
+// Utilities for example applications (for the main thread only).
+
+var logBox = null;
+var queuedLog = '';
+
+var summaryBox = null;
+
+function queueLog(log) {
+ queuedLog += log + '\n';
+}
+
+function addToLog(log) {
+ logBox.value += queuedLog;
+ queuedLog = '';
+ logBox.value += log + '\n';
+ logBox.scrollTop = 1000000;
+}
+
+function addToSummary(log) {
+ summaryBox.value += log + '\n';
+ summaryBox.scrollTop = 1000000;
+}
+
+// value: execution time in milliseconds.
+// config.measureValue is intended to be used in Performance Tests.
+// Do nothing here in non-PerformanceTest.
+function measureValue(value) {
+}
+
+function getIntFromInput(id) {
+ return parseInt(document.getElementById(id).value);
+}
+
+function getStringFromRadioBox(name) {
+ var list = document.getElementById('benchmark_form')[name];
+ for (var i = 0; i < list.length; ++i)
+ if (list.item(i).checked)
+ return list.item(i).value;
+ return undefined;
+}
+function getBoolFromCheckBox(id) {
+ return document.getElementById(id).checked;
+}
+
+function getIntArrayFromInput(id) {
+ var strArray = document.getElementById(id).value.split(',');
+ return strArray.map(function(str) { return parseInt(str, 10); });
+}
+
+function onMessage(message) {
+ if (message.data.type === 'addToLog')
+ addToLog(message.data.data);
+ else if (message.data.type === 'addToSummary')
+ addToSummary(message.data.data);
+ else if (message.data.type === 'measureValue')
+ measureValue(message.data.data);
+}
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/util_worker.js b/testing/web-platform/tests/tools/pywebsocket/src/example/util_worker.js
new file mode 100644
index 000000000..b64f7829d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/util_worker.js
@@ -0,0 +1,19 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the COPYING file or at
+// https://developers.google.com/open-source/licenses/bsd
+
+// Utilities for example applications (for the worker threads only).
+
+function workerAddToLog(text) {
+ postMessage({type: 'addToLog', data: text});
+}
+
+function workerAddToSummary(text) {
+ postMessage({type: 'addToSummary', data: text});
+}
+
+function workerMeasureValue(value) {
+ postMessage({type: 'measureValue', data: value});
+}
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.html b/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.html
new file mode 100644
index 000000000..186229775
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.html
@@ -0,0 +1,222 @@
+<!--
+Copyright 2013, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<html>
+<head>
+<title>XMLHttpRequest benchmark</title>
+<script src="util_main.js"></script>
+<script src="util.js"></script>
+<script src="xhr_benchmark.js"></script>
+<script>
+var addressBox = null;
+
+function getConfig() {
+ return {
+ prefixUrl: addressBox.value,
+ printSize: getBoolFromCheckBox('printsize'),
+ numXHRs: getIntFromInput('numXHRs'),
+ async: getBoolFromCheckBox('async'),
+ // Initial size of messages.
+ numIterations: getIntFromInput('numiterations'),
+ numWarmUpIterations: getIntFromInput('numwarmupiterations'),
+ startSize: getIntFromInput('startsize'),
+ // Stops benchmark when the size of message exceeds this threshold.
+ stopThreshold: getIntFromInput('stopthreshold'),
+ // If the size of each message is small, send/receive multiple messages
+ // until the sum of sizes reaches this threshold.
+ // minTotal: getIntFromInput('mintotal'),
+ // minTotal is not yet implemented on XHR benchmark
+ multipliers: getIntArrayFromInput('multipliers'),
+ verifyData: getBoolFromCheckBox('verifydata')
+ };
+}
+
+var worker = new Worker('xhr_benchmark.js');
+worker.onmessage = onMessage;
+
+function onSendBenchmark() {
+ var config = getConfig();
+ config.dataType = getStringFromRadioBox('datatyperadio');
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'sendBenchmark', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ sendBenchmark(config);
+ }
+}
+
+function onReceiveBenchmark() {
+ var config = getConfig();
+ config.dataType = getStringFromRadioBox('datatyperadio');
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'receiveBenchmark', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ receiveBenchmark(config);
+ }
+}
+
+function onBatchBenchmark() {
+ var config = getConfig();
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'batchBenchmark', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ batchBenchmark(config);
+ }
+}
+
+function onStop() {
+ var config = getConfig();
+
+ if (getBoolFromCheckBox('worker')) {
+ worker.postMessage({type: 'stop', config: config});
+ } else {
+ config.addToLog = addToLog;
+ config.addToSummary = addToSummary;
+ config.measureValue = measureValue;
+ stop(config);
+ }
+}
+
+function init() {
+ addressBox = document.getElementById('address');
+ logBox = document.getElementById('log');
+
+ summaryBox = document.getElementById('summary');
+
+ // Special address of pywebsocket for XHR benchmark.
+ addressBox.value = '/073be001e10950692ccbf3a2ad21c245';
+
+ addToLog(window.navigator.userAgent.toLowerCase());
+ addToSummary(window.navigator.userAgent.toLowerCase());
+}
+</script>
+</head>
+<body onload="init()">
+
+<form id="benchmark_form">
+ url prefix <input type="text" id="address" size="40">
+ <input type="button" value="send" onclick="onSendBenchmark()">
+ <input type="button" value="receive" onclick="onReceiveBenchmark()">
+ <input type="button" value="batch" onclick="onBatchBenchmark()">
+ <input type="button" value="stop" onclick="onStop()">
+
+ <br/>
+
+ <input type="checkbox" id="printsize" checked>
+ <label for="printsize">Print size and time per message</label>
+ <input type="checkbox" id="verifydata" checked>
+ <label for="verifydata">Verify data</label>
+ <input type="checkbox" id="worker">
+ <label for="worker">Run on worker</label>
+ <input type="checkbox" id="async" checked>
+ <label for="async">Async</label><br>
+ (Receive &amp;&amp; Non-Worker &amp;&amp; Sync is not supported by spec)
+
+ <br/>
+
+ Parameters:
+
+ <br/>
+
+ <table>
+ <tr>
+ <td>Num XHRs</td>
+ <td><input type="text" id="numXHRs" value="1"></td>
+ </tr>
+ <tr>
+ <td>Number of iterations</td>
+ <td><input type="text" id="numiterations" value="1"></td>
+ </tr>
+ <tr>
+ <td>Number of warm-up iterations</td>
+ <td><input type="text" id="numwarmupiterations" value="0"></td>
+ </tr>
+ <tr>
+ <td>Start size</td>
+ <td><input type="text" id="startsize" value="10240"></td>
+ </tr>
+ <tr>
+ <td>Stop threshold</td>
+ <td><input type="text" id="stopthreshold" value="102400000"></td>
+ </tr>
+ <tr>
+ <td>Minimum total</td>
+ <td><input type="text" id="mintotal" value="102400000"></td>
+ </tr>
+ <tr>
+ <td>Multipliers</td>
+ <td><input type="text" id="multipliers" value="5, 2"></td>
+ </tr>
+ </table>
+
+ Set data type
+ <input type="radio"
+ name="datatyperadio"
+ id="datatyperadiotext"
+ value="text"
+ checked><label for="datatyperadiotext">text</label>
+ <input type="radio"
+ name="datatyperadio"
+ id="datatyperadioblob"
+ value="blob"
+ ><label for="datatyperadioblob">blob</label>
+ <input type="radio"
+ name="datatyperadio"
+ id="datatyperadioarraybuffer"
+ value="arraybuffer"
+ ><label for="datatyperadioarraybuffer">arraybuffer</label>
+</form>
+
+<div id="log_div">
+ <textarea
+ id="log" rows="20" style="width: 100%" readonly></textarea>
+</div>
+<div id="summary_div">
+ Summary
+ <textarea
+ id="summary" rows="20" style="width: 100%" readonly></textarea>
+</div>
+
+Note: Effect of RTT and time spent for ArrayBuffer creation in receive benchmarks are not eliminated.
+
+</body>
+</html>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.js b/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.js
new file mode 100644
index 000000000..233c7cb38
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_benchmark.js
@@ -0,0 +1,389 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the COPYING file or at
+// https://developers.google.com/open-source/licenses/bsd
+
+var isWorker = typeof importScripts !== "undefined";
+
+if (isWorker) {
+ // Running on a worker
+ importScripts('util.js', 'util_worker.js');
+}
+
+// Namespace for holding globals.
+var benchmark = {};
+benchmark.startTimeInMs = 0;
+
+var xhrs = [];
+
+var timerID = null;
+
+function destroyAllXHRs() {
+ for (var i = 0; i < xhrs.length; ++i) {
+ xhrs[i].onreadystatechange = null;
+ // Abort XHRs if they are not yet DONE state.
+ // Calling abort() here (i.e. in onreadystatechange handler)
+ // causes "NetworkError" messages in DevTools in sync mode,
+ // even if it is after transition to DONE state.
+ if (xhrs[i].readyState != XMLHttpRequest.DONE)
+ xhrs[i].abort();
+ }
+ xhrs = [];
+ // gc() might be needed for Chrome/Blob
+}
+
+function repeatString(str, count) {
+ var data = '';
+ var expChunk = str;
+ var remain = count;
+ while (true) {
+ if (remain % 2) {
+ data += expChunk;
+ remain = (remain - 1) / 2;
+ } else {
+ remain /= 2;
+ }
+
+ if (remain == 0)
+ break;
+
+ expChunk = expChunk + expChunk;
+ }
+ return data;
+}
+
+function sendBenchmarkStep(size, config) {
+ timerID = null;
+
+ benchmark.startTimeInMs = null;
+ var totalSize = 0;
+ var totalReplied = 0;
+
+ var onReadyStateChangeHandler = function () {
+ if (this.readyState != this.DONE) {
+ return;
+ }
+
+ if (this.status != 200) {
+ config.addToLog('Failed (status=' + this.status + ')');
+ destroyAllXHRs();
+ return;
+ }
+
+ if (config.verifyData &&
+ !verifyAcknowledgement(config, this.response, size)) {
+ destroyAllXHRs();
+ return;
+ }
+
+ totalReplied += size;
+
+ if (totalReplied < totalSize) {
+ return;
+ }
+
+ if (benchmark.startTimeInMs == null) {
+ config.addToLog('startTimeInMs not set');
+ destroyAllXHRs();
+ return;
+ }
+
+ calculateAndLogResult(config, size, benchmark.startTimeInMs, totalSize);
+
+ destroyAllXHRs();
+
+ runNextTask(config);
+ };
+
+ for (var i = 0; i < config.numXHRs; ++i) {
+ var xhr = new XMLHttpRequest();
+ xhr.onreadystatechange = onReadyStateChangeHandler;
+ xhrs.push(xhr);
+ }
+
+ var dataArray = [];
+
+ for (var i = 0; i < xhrs.length; ++i) {
+ var data = null;
+ if (config.dataType == 'arraybuffer' ||
+ config.dataType == 'blob') {
+ data = new ArrayBuffer(size);
+
+ fillArrayBuffer(data, 0x61);
+
+ if (config.dataType == 'blob') {
+ data = new Blob([data]);
+ }
+ } else {
+ data = repeatString('a', size);
+ }
+
+ dataArray.push(data);
+ }
+
+
+ benchmark.startTimeInMs = getTimeStamp();
+ totalSize = size * xhrs.length;
+
+ for (var i = 0; i < xhrs.length; ++i) {
+ var data = dataArray[i];
+ var xhr = xhrs[i];
+ xhr.open('POST', config.prefixUrl + '_send', config.async);
+ xhr.send(data);
+ }
+}
+
+function receiveBenchmarkStep(size, config) {
+ timerID = null;
+
+ benchmark.startTimeInMs = null;
+ var totalSize = 0;
+ var totalReplied = 0;
+
+ var checkResultAndContinue = function (bytesReceived, verificationResult) {
+ if (!verificationResult) {
+ config.addToLog('Response verification failed');
+ destroyAllXHRs();
+ return;
+ }
+
+ totalReplied += bytesReceived;
+
+ if (totalReplied < totalSize) {
+ return;
+ }
+
+ if (benchmark.startTimeInMs == null) {
+ config.addToLog('startTimeInMs not set');
+ destroyAllXHRs();
+ return;
+ }
+
+ calculateAndLogResult(config, size, benchmark.startTimeInMs, totalSize);
+
+ destroyAllXHRs();
+
+ runNextTask(config);
+ }
+
+ var onReadyStateChangeHandler = function () {
+ if (this.readyState != this.DONE) {
+ return;
+ }
+
+ if (this.status != 200) {
+ config.addToLog('Failed (status=' + this.status + ')');
+ destroyAllXHRs();
+ return;
+ }
+
+ var bytesReceived = -1;
+ if (this.responseType == 'arraybuffer') {
+ bytesReceived = this.response.byteLength;
+ } else if (this.responseType == 'blob') {
+ bytesReceived = this.response.size;
+ } else {
+ bytesReceived = this.response.length;
+ }
+ if (bytesReceived != size) {
+ config.addToLog('Expected ' + size +
+ 'B but received ' + bytesReceived + 'B');
+ destroyAllXHRs();
+ return;
+ }
+
+ if (this.responseType == 'arraybuffer') {
+ checkResultAndContinue(bytesReceived,
+ !config.verifyData || verifyArrayBuffer(this.response, 0x61));
+ } else if (this.responseType == 'blob') {
+ if (config.verifyData)
+ verifyBlob(config, this.response, 0x61, checkResultAndContinue);
+ else
+ checkResultAndContinue(bytesReceived, true);
+ } else {
+ checkResultAndContinue(
+ bytesReceived,
+ !config.verifyData ||
+ this.response == repeatString('a', this.response.length));
+ }
+ };
+
+ for (var i = 0; i < config.numXHRs; ++i) {
+ var xhr = new XMLHttpRequest();
+ xhr.onreadystatechange = onReadyStateChangeHandler;
+ xhrs.push(xhr);
+ }
+
+ benchmark.startTimeInMs = getTimeStamp();
+ totalSize = size * xhrs.length;
+
+ for (var i = 0; i < xhrs.length; ++i) {
+ var xhr = xhrs[i];
+ xhr.open('POST', config.prefixUrl + '_receive', config.async);
+ xhr.responseType = config.dataType;
+ xhr.send(size + ' none');
+ }
+}
+
+
+function getConfigString(config) {
+ return '(' + config.dataType +
+ ', verifyData=' + config.verifyData +
+ ', ' + (isWorker ? 'Worker' : 'Main') +
+ ', ' + (config.async ? 'Async' : 'Sync') +
+ ', numXHRs=' + config.numXHRs +
+ ', numIterations=' + config.numIterations +
+ ', numWarmUpIterations=' + config.numWarmUpIterations +
+ ')';
+}
+
+function startBenchmark(config) {
+ clearTimeout(timerID);
+ destroyAllXHRs();
+
+ runNextTask(config);
+}
+
+// TODO(hiroshige): the following code is the same as benchmark.html
+// and some of them should be merged into e.g. util.js
+
+var tasks = [];
+
+function runNextTask(config) {
+ var task = tasks.shift();
+ if (task == undefined) {
+ config.addToLog('Finished');
+ destroyAllXHRs();
+ return;
+ }
+ timerID = setTimeout(task, 0);
+}
+
+function buildLegendString(config) {
+ var legend = ''
+ if (config.printSize)
+ legend = 'Message size in KiB, Time/message in ms, ';
+ legend += 'Speed in kB/s';
+ return legend;
+}
+
+function addTasks(config, stepFunc) {
+ for (var i = 0;
+ i < config.numWarmUpIterations + config.numIterations; ++i) {
+ // Ignore the first |config.numWarmUpIterations| iterations.
+ if (i == config.numWarmUpIterations)
+ addResultClearingTask(config);
+
+ var multiplierIndex = 0;
+ for (var size = config.startSize;
+ size <= config.stopThreshold;
+ ++multiplierIndex) {
+ var task = stepFunc.bind(
+ null,
+ size,
+ config);
+ tasks.push(task);
+ size *= config.multipliers[
+ multiplierIndex % config.multipliers.length];
+ }
+ }
+}
+
+function addResultReportingTask(config, title) {
+ tasks.push(function(){
+ timerID = null;
+ config.addToSummary(title);
+ reportAverageData(config);
+ clearAverageData();
+ runNextTask(config);
+ });
+}
+
+function addResultClearingTask(config) {
+ tasks.push(function(){
+ timerID = null;
+ clearAverageData();
+ runNextTask(config);
+ });
+}
+
+// --------------------------------
+
+function sendBenchmark(config) {
+ config.addToLog('Send benchmark');
+ config.addToLog(buildLegendString(config));
+
+ tasks = [];
+ clearAverageData();
+ addTasks(config, sendBenchmarkStep);
+ addResultReportingTask(config, 'Send Benchmark ' + getConfigString(config));
+ startBenchmark(config);
+}
+
+function receiveBenchmark(config) {
+ config.addToLog('Receive benchmark');
+ config.addToLog(buildLegendString(config));
+
+ tasks = [];
+ clearAverageData();
+ addTasks(config, receiveBenchmarkStep);
+ addResultReportingTask(config,
+ 'Receive Benchmark ' + getConfigString(config));
+ startBenchmark(config);
+}
+
+function batchBenchmark(originalConfig) {
+ originalConfig.addToLog('Batch benchmark');
+
+ tasks = [];
+ clearAverageData();
+
+ var dataTypes = ['text', 'blob', 'arraybuffer'];
+ var stepFuncs = [sendBenchmarkStep, receiveBenchmarkStep];
+ var names = ['Send', 'Receive'];
+ var async = [true, false];
+ for (var i = 0; i < stepFuncs.length; ++i) {
+ for (var j = 0; j < dataTypes.length; ++j) {
+ for (var k = 0; k < async.length; ++k) {
+ var config = cloneConfig(originalConfig);
+ config.dataType = dataTypes[j];
+ config.async = async[k];
+
+ // Receive && Non-Worker && Sync is not supported by the spec
+ if (stepFuncs[i] === receiveBenchmarkStep && !isWorker &&
+ !config.async)
+ continue;
+
+ addTasks(config, stepFuncs[i]);
+ addResultReportingTask(config,
+ names[i] + ' benchmark ' + getConfigString(config));
+ }
+ }
+ }
+
+ startBenchmark(config);
+}
+
+
+function stop(config) {
+ destroyAllXHRs();
+ clearTimeout(timerID);
+ timerID = null;
+ config.addToLog('Stopped');
+}
+
+onmessage = function (message) {
+ var config = message.data.config;
+ config.addToLog = workerAddToLog;
+ config.addToSummary = workerAddToSummary;
+ config.measureValue = workerMeasureValue;
+ if (message.data.type === 'sendBenchmark')
+ sendBenchmark(config);
+ else if (message.data.type === 'receiveBenchmark')
+ receiveBenchmark(config);
+ else if (message.data.type === 'batchBenchmark')
+ batchBenchmark(config);
+ else if (message.data.type === 'stop')
+ stop(config);
+};
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_event_logger.html b/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_event_logger.html
new file mode 100644
index 000000000..6983553b8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/example/xhr_event_logger.html
@@ -0,0 +1,110 @@
+<!--
+Copyright 2014 Google Inc. All rights reserved.
+
+Use of this source code is governed by a BSD-style
+license that can be found in the COPYING file or at
+https://developers.google.com/open-source/licenses/bsd
+-->
+
+<html>
+<head>
+<title>XHR event logger</title>
+<script src="util_main.js"></script>
+<script>
+var events = [];
+
+function run() {
+ events = [];
+
+ function pushToLog(type) {
+ if (events.length != 0 && type === events[events.length - 1].type) {
+ events[events.length - 1].count += 1;
+ } else {
+ events.push({type: type, count: 1});
+ }
+ }
+
+ var xhr = new XMLHttpRequest();
+
+ function getProgressEventDump(e) {
+ return '(' + e.lengthComputable + ', ' + e.loaded + ', ' + e.total + ')';
+ }
+
+ var dumpProgressEvent = getBoolFromCheckBox('dumpprogressevent');
+
+ function log(e) {
+ var type = e.type;
+ if (type === 'readystatechange') {
+ type += e.target.readyState;
+ }
+ if (dumpProgressEvent && (e instanceof ProgressEvent)) {
+ type += getProgressEventDump(e);
+ }
+ pushToLog(type);
+ };
+
+ function logUpload(e) {
+ var type = e.type;
+ if (dumpProgressEvent && (e instanceof ProgressEvent)) {
+ type += getProgressEventDump(e);
+ }
+ pushToLog('upload' + type);
+ }
+
+ if (getBoolFromCheckBox('upload')) {
+ var upload = xhr.upload;
+ upload.onloadstart = logUpload;
+ upload.onprogress = logUpload;
+ upload.onabort = logUpload;
+ upload.onerror = logUpload;
+ upload.onload = logUpload;
+ upload.ontimeout = logUpload;
+ upload.onloadend = logUpload;
+ }
+
+ xhr.onreadystatechange = log;
+ xhr.onloadstart = log;
+ xhr.onprogress = log;
+ xhr.onabort = log;
+ xhr.onerror = log;
+ xhr.onload = log;
+ xhr.ontimeout = log;
+ xhr.onloadend = log;
+
+ xhr.open('POST', '/073be001e10950692ccbf3a2ad21c245_receive',
+ getBoolFromCheckBox('async'));
+ var size = getIntFromInput('size');
+ var chunkedMode = 'none';
+ if (getBoolFromCheckBox('chunkedresponse')) {
+ chunkedMode = 'chunked';
+ }
+ xhr.send(size + ' ' + chunkedMode);
+}
+
+function print() {
+ var result = '';
+ for (var i = 0; i < events.length; ++i) {
+ var event = events[i];
+ result += event.type + ' * ' + event.count + '\n';
+ }
+ document.getElementById('log').value = result;
+}
+</script>
+
+<body>
+ <textarea id="log" rows="10" cols="40" readonly></textarea>
+ <br/>
+ Size: <input type="text" id="size" value="65536"><br/>
+ <input type="checkbox" id="chunkedresponse">
+ <label for="chunkedresponse">Use Chunked T-E for response</label><br/>
+ <input type="checkbox" id="upload">
+ <label for="upload">Upload progress</label><br/>
+ <input type="checkbox" id="dumpprogressevent">
+ <label for="dumpprogressevent">
+ Dump lengthComputable/loaded/total</label><br/>
+ <input type="checkbox" id="async" checked>
+ <label for="async">Async</label><br/>
+ <input type="button" onclick="run()" value="Run XHR">
+ <input type="button" onclick="print()" value="Print log">
+</body>
+</html>
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/__init__.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/__init__.py
new file mode 100644
index 000000000..70933a220
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/__init__.py
@@ -0,0 +1,224 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket extension for Apache HTTP Server.
+
+mod_pywebsocket is a WebSocket extension for Apache HTTP Server
+intended for testing or experimental purposes. mod_python is required.
+
+
+Installation
+============
+
+0. Prepare an Apache HTTP Server for which mod_python is enabled.
+
+1. Specify the following Apache HTTP Server directives to suit your
+ configuration.
+
+ If mod_pywebsocket is not in the Python path, specify the following.
+ <websock_lib> is the directory where mod_pywebsocket is installed.
+
+ PythonPath "sys.path+['<websock_lib>']"
+
+ Always specify the following. <websock_handlers> is the directory where
+ user-written WebSocket handlers are placed.
+
+ PythonOption mod_pywebsocket.handler_root <websock_handlers>
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+
+ To limit the search for WebSocket handlers to a directory <scan_dir>
+ under <websock_handlers>, configure as follows:
+
+ PythonOption mod_pywebsocket.handler_scan <scan_dir>
+
+ <scan_dir> is useful in saving scan time when <websock_handlers>
+ contains many non-WebSocket handler files.
+
+ If you want to allow handlers whose canonical path is not under the root
+ directory (i.e. symbolic link is in root directory but its target is not),
+ configure as follows:
+
+ PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
+
+ Example snippet of httpd.conf:
+ (mod_pywebsocket is in /websock_lib, WebSocket handlers are in
+ /websock_handlers, port is 80 for ws, 443 for wss.)
+
+ <IfModule python_module>
+ PythonPath "sys.path+['/websock_lib']"
+ PythonOption mod_pywebsocket.handler_root /websock_handlers
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+ </IfModule>
+
+2. Tune Apache parameters for serving WebSocket. We'd like to note that at
+ least TimeOut directive from core features and RequestReadTimeout
+ directive from mod_reqtimeout should be modified not to kill connections
+ in only a few seconds of idle time.
+
+3. Verify installation. You can use example/console.html to poke the server.
+
+
+Writing WebSocket handlers
+==========================
+
+When a WebSocket request comes in, the resource name
+specified in the handshake is considered as if it is a file path under
+<websock_handlers> and the handler defined in
+<websock_handlers>/<resource_name>_wsh.py is invoked.
+
+For example, if the resource name is /example/chat, the handler defined in
+<websock_handlers>/example/chat_wsh.py is invoked.
+
+A WebSocket handler is composed of the following three functions:
+
+ web_socket_do_extra_handshake(request)
+ web_socket_transfer_data(request)
+ web_socket_passive_closing_handshake(request)
+
+where:
+ request: mod_python request.
+
+web_socket_do_extra_handshake is called during the handshake after the
+headers are successfully parsed and WebSocket properties (ws_location,
+ws_origin, and ws_resource) are added to request. A handler
+can reject the request by raising an exception.
+
+A request object has the following properties that you can use during the
+extra handshake (web_socket_do_extra_handshake):
+- ws_resource
+- ws_origin
+- ws_version
+- ws_location (HyBi 00 only)
+- ws_extensions (HyBi 06 and later)
+- ws_deflate (HyBi 06 and later)
+- ws_protocol
+- ws_requested_protocols (HyBi 06 and later)
+
+The last two are a bit tricky. See the next subsection.
+
+
+Subprotocol Negotiation
+-----------------------
+
+For HyBi 06 and later, ws_protocol is always set to None when
+web_socket_do_extra_handshake is called. If ws_requested_protocols is not
+None, you must choose one subprotocol from this list and set it to
+ws_protocol.
+
+For HyBi 00, when web_socket_do_extra_handshake is called,
+ws_protocol is set to the value given by the client in
+Sec-WebSocket-Protocol header or None if
+such header was not found in the opening handshake request. Finish extra
+handshake with ws_protocol untouched to accept the request subprotocol.
+Then, Sec-WebSocket-Protocol header will be sent to
+the client in response with the same value as requested. Raise an exception
+in web_socket_do_extra_handshake to reject the requested subprotocol.
+
+
+Data Transfer
+-------------
+
+web_socket_transfer_data is called after the handshake completed
+successfully. A handler can receive/send messages from/to the client
+using request. mod_pywebsocket.msgutil module provides utilities
+for data transfer.
+
+You can receive a message by the following statement.
+
+ message = request.ws_stream.receive_message()
+
+This call blocks until any complete text frame arrives, and the payload data
+of the incoming frame will be stored into message. When you're using IETF
+HyBi 00 or later protocol, receive_message() will return None on receiving
+client-initiated closing handshake. When any error occurs, receive_message()
+will raise some exception.
+
+You can send a message by the following statement.
+
+ request.ws_stream.send_message(message)
+
+
+Closing Connection
+------------------
+
+Executing the following statement or just return-ing from
+web_socket_transfer_data cause connection close.
+
+ request.ws_stream.close_connection()
+
+close_connection will wait
+for closing handshake acknowledgement coming from the client. When it
+couldn't receive a valid acknowledgement, raises an exception.
+
+web_socket_passive_closing_handshake is called after the server receives
+incoming closing frame from the client peer immediately. You can specify
+code and reason by return values. They are sent as a outgoing closing frame
+from the server. A request object has the following properties that you can
+use in web_socket_passive_closing_handshake.
+- ws_close_code
+- ws_close_reason
+
+
+Threading
+---------
+
+A WebSocket handler must be thread-safe if the server (Apache or
+standalone.py) is configured to use threads.
+
+
+Configuring WebSocket Extension Processors
+------------------------------------------
+
+See extensions.py for supported WebSocket extensions. Note that they are
+unstable and their APIs are subject to change substantially.
+
+A request object has these extension processing related attributes.
+
+- ws_requested_extensions:
+
+ A list of common.ExtensionParameter instances representing extension
+ parameters received from the client in the client's opening handshake.
+ You shouldn't modify it manually.
+
+- ws_extensions:
+
+ A list of common.ExtensionParameter instances representing extension
+ parameters to send back to the client in the server's opening handshake.
+ You shouldn't touch it directly. Instead, call methods on extension
+ processors.
+
+- ws_extension_processors:
+
+ A list of loaded extension processors. Find the processor for the
+ extension you want to configure from it, and call its methods.
+"""
+
+
+# vi:sts=4 sw=4 et tw=72
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py
new file mode 100644
index 000000000..8235666bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py
@@ -0,0 +1,181 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Base stream class.
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import socket
+
+from mod_pywebsocket import util
+
+
+# Exceptions
+
+
+class ConnectionTerminatedException(Exception):
+ """This exception will be raised when a connection is terminated
+ unexpectedly.
+ """
+
+ pass
+
+
+class InvalidFrameException(ConnectionTerminatedException):
+ """This exception will be raised when we received an invalid frame we
+ cannot parse.
+ """
+
+ pass
+
+
+class BadOperationException(Exception):
+ """This exception will be raised when send_message() is called on
+ server-terminated connection or receive_message() is called on
+ client-terminated connection.
+ """
+
+ pass
+
+
+class UnsupportedFrameException(Exception):
+ """This exception will be raised when we receive a frame with flag, opcode
+ we cannot handle. Handlers can just catch and ignore this exception and
+ call receive_message() again to continue processing the next frame.
+ """
+
+ pass
+
+
+class InvalidUTF8Exception(Exception):
+ """This exception will be raised when we receive a text frame which
+ contains invalid UTF-8 strings.
+ """
+
+ pass
+
+
+class StreamBase(object):
+ """Base stream class."""
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def _read(self, length):
+ """Reads length bytes from connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ try:
+ read_bytes = self._request.connection.read(length)
+ if not read_bytes:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Peer (%r) closed connection' %
+ (length, (self._request.connection.remote_addr,)))
+ return read_bytes
+ except socket.error, e:
+ # Catch a socket.error. Because it's not a child class of the
+ # IOError prior to Python 2.6, we cannot omit this except clause.
+ # Use %s rather than %r for the exception to use human friendly
+ # format.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. socket.error (%s) occurred' %
+ (length, e))
+ except IOError, e:
+ # Also catch an IOError because mod_python throws it.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. IOError (%s) occurred' %
+ (length, e))
+
+ def _write(self, bytes_to_write):
+ """Writes given bytes to connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+ """
+
+ try:
+ self._request.connection.write(bytes_to_write)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._request.connection.remote_addr,),
+ e)
+ raise
+
+ def receive_bytes(self, length):
+ """Receives multiple bytes. Retries read when we couldn't receive the
+ specified amount.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ read_bytes = []
+ while length > 0:
+ new_read_bytes = self._read(length)
+ read_bytes.append(new_read_bytes)
+ length -= len(new_read_bytes)
+ return ''.join(read_bytes)
+
+ def _read_until(self, delim_char):
+ """Reads bytes until we encounter delim_char. The result will not
+ contain delim_char.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ read_bytes = []
+ while True:
+ ch = self._read(1)
+ if ch == delim_char:
+ break
+ read_bytes.append(ch)
+ return ''.join(read_bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hixie75.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hixie75.py
new file mode 100644
index 000000000..94cf5b31b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hixie75.py
@@ -0,0 +1,229 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides a class for parsing/building frames of the WebSocket
+protocol version HyBi 00 and Hixie 75.
+
+Specification:
+- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket import util
+
+
+class StreamHixie75(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol version
+ HyBi 00 and Hixie 75.
+ """
+
+ def __init__(self, request, enable_closing_handshake=False):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ enable_closing_handshake: to let StreamHixie75 perform closing
+ handshake as specified in HyBi 00, set
+ this option to True.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._enable_closing_handshake = enable_closing_handshake
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: unicode string to send.
+ binary: not used in hixie75.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection.
+ """
+
+ if not end:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with end=False')
+
+ if binary:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with binary=True')
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
+
+ def _read_payload_length_hixie75(self):
+ """Reads a length header in a Hixie75 version frame with length.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ length = 0
+ while True:
+ b_str = self._read(1)
+ b = ord(b_str)
+ length = length * 128 + (b & 0x7f)
+ if (b & 0x80) == 0:
+ break
+ return length
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload an unicode string.
+
+ Returns:
+ payload unicode string in a WebSocket frame.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ BadOperationException: when called on a client-terminated
+ connection.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # Read 1 byte.
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+ frame_type_str = self.receive_bytes(1)
+ frame_type = ord(frame_type_str)
+ if (frame_type & 0x80) == 0x80:
+ # The payload length is specified in the frame.
+ # Read and discard.
+ length = self._read_payload_length_hixie75()
+ if length > 0:
+ _ = self.receive_bytes(length)
+ # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
+ # /client terminated/ flag and abort these steps.
+ if not self._enable_closing_handshake:
+ continue
+
+ if frame_type == 0xFF and length == 0:
+ self._request.client_terminated = True
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing '
+ 'handshake')
+ return None
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ self._send_closing_handshake()
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake')
+ return None
+ else:
+ # The payload is delimited with \xff.
+ bytes = self._read_until('\xff')
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ message = bytes.decode('utf-8', 'replace')
+ if frame_type == 0x00:
+ return message
+ # Discard data of other types.
+
+ def _send_closing_handshake(self):
+ if not self._enable_closing_handshake:
+ raise BadOperationException(
+ 'Closing handshake is not supported in Hixie 75 protocol')
+
+ self._request.server_terminated = True
+
+ # 5.3 the server may decide to terminate the WebSocket connection by
+ # running through the following steps:
+ # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
+ # start of the closing handshake.
+ self._write('\xff\x00')
+
+ def close_connection(self, unused_code='', unused_reason=''):
+ """Closes a WebSocket connection.
+
+ Raises:
+ ConnectionTerminatedException: when closing handshake was
+ not successfull.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if not self._enable_closing_handshake:
+ self._request.server_terminated = True
+ self._logger.debug('Connection closed')
+ return
+
+ self._send_closing_handshake()
+ self._logger.debug('Sent server-initiated closing handshake')
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake, and if we couldn't receive non-handshake
+ # frame, we take it as ConnectionTerminatedException.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body):
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_ping')
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hybi.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hybi.py
new file mode 100644
index 000000000..a8a49e3c3
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hybi.py
@@ -0,0 +1,887 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for parsing/building frames
+of the WebSocket protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+from collections import deque
+import logging
+import os
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+_NOOP_MASKER = util.NoopMasker()
+
+
+class Frame(object):
+
+ def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
+ opcode=None, payload=''):
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.payload = payload
+
+
+# Helper functions made public to be used for writing unittests for WebSocket
+# clients.
+
+
+def create_length_header(length, mask):
+ """Creates a length header.
+
+ Args:
+ length: Frame length. Must be less than 2^63.
+ mask: Mask bit. Must be boolean.
+
+ Raises:
+ ValueError: when bad data is given.
+ """
+
+ if mask:
+ mask_bit = 1 << 7
+ else:
+ mask_bit = 0
+
+ if length < 0:
+ raise ValueError('length must be non negative integer')
+ elif length <= 125:
+ return chr(mask_bit | length)
+ elif length < (1 << 16):
+ return chr(mask_bit | 126) + struct.pack('!H', length)
+ elif length < (1 << 63):
+ return chr(mask_bit | 127) + struct.pack('!Q', length)
+ else:
+ raise ValueError('Payload is too big for one frame')
+
+
+def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
+ """Creates a frame header.
+
+ Raises:
+ Exception: when bad data is given.
+ """
+
+ if opcode < 0 or 0xf < opcode:
+ raise ValueError('Opcode out of range')
+
+ if payload_length < 0 or (1 << 63) <= payload_length:
+ raise ValueError('payload_length out of range')
+
+ if (fin | rsv1 | rsv2 | rsv3) & ~1:
+ raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
+
+ header = ''
+
+ first_byte = ((fin << 7)
+ | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
+ | opcode)
+ header += chr(first_byte)
+ header += create_length_header(payload_length, mask)
+
+ return header
+
+
+def _build_frame(header, body, mask):
+ if not mask:
+ return header + body
+
+ masking_nonce = os.urandom(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ return header + masking_nonce + masker.mask(body)
+
+
+def _filter_and_format_frame_object(frame, mask, frame_filters):
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_binary_frame(
+ message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple binary frame with no extension, reserved bit."""
+
+ frame = Frame(fin=fin, opcode=opcode, payload=message)
+ return _filter_and_format_frame_object(frame, mask, frame_filters)
+
+
+def create_text_frame(
+ message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple text frame with no extension, reserved bit."""
+
+ encoded_message = message.encode('utf-8')
+ return create_binary_frame(encoded_message, opcode, fin, mask,
+ frame_filters)
+
+
+def parse_frame(receive_bytes, logger=None,
+ ws_version=common.VERSION_HYBI_LATEST,
+ unmask_receive=True):
+ """Parses a frame. Returns a tuple containing each header field and
+ payload.
+
+ Args:
+ receive_bytes: a function that reads frame data from a stream or
+ something similar. The function takes length of the bytes to be
+ read. The function must raise ConnectionTerminatedException if
+ there is not enough data to be read.
+ logger: a logging object.
+ ws_version: the version of WebSocket protocol.
+ unmask_receive: unmask received frames. When received unmasked
+ frame, raises InvalidFrameException.
+
+ Raises:
+ ConnectionTerminatedException: when receive_bytes raises it.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ if not logger:
+ logger = logging.getLogger()
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
+
+ received = receive_bytes(2)
+
+ first_byte = ord(received[0])
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xf
+
+ second_byte = ord(received[1])
+ mask = (second_byte >> 7) & 1
+ payload_length = second_byte & 0x7f
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
+ 'Mask=%s, Payload_length=%s',
+ fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
+
+ if (mask == 1) != unmask_receive:
+ raise InvalidFrameException(
+ 'Mask bit on the received frame did\'nt match masking '
+ 'configuration for received frames')
+
+ # The HyBi and later specs disallow putting a value in 0x0-0xFFFF
+ # into the 8-octet extended payload length field (or 0x0-0xFD in
+ # 2-octet field).
+ valid_length_encoding = True
+ length_encoding_bytes = 1
+ if payload_length == 127:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 8-octet extended payload length')
+
+ extended_payload_length = receive_bytes(8)
+ payload_length = struct.unpack(
+ '!Q', extended_payload_length)[0]
+ if payload_length > 0x7FFFFFFFFFFFFFFF:
+ raise InvalidFrameException(
+ 'Extended payload length >= 2^63')
+ if ws_version >= 13 and payload_length < 0x10000:
+ valid_length_encoding = False
+ length_encoding_bytes = 8
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+ elif payload_length == 126:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 2-octet extended payload length')
+
+ extended_payload_length = receive_bytes(2)
+ payload_length = struct.unpack(
+ '!H', extended_payload_length)[0]
+ if ws_version >= 13 and payload_length < 126:
+ valid_length_encoding = False
+ length_encoding_bytes = 2
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+
+ if not valid_length_encoding:
+ logger.warning(
+ 'Payload length is not encoded using the minimal number of '
+ 'bytes (%d is encoded using %d bytes)',
+ payload_length,
+ length_encoding_bytes)
+
+ if mask == 1:
+ logger.log(common.LOGLEVEL_FINE, 'Receive mask')
+
+ masking_nonce = receive_bytes(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
+ else:
+ masker = _NOOP_MASKER
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ receive_start = time.time()
+
+ raw_payload_bytes = receive_bytes(payload_length)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done receiving payload data at %s MB/s',
+ payload_length / (time.time() - receive_start) / 1000 / 1000)
+ logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ unmask_start = time.time()
+
+ unmasked_bytes = masker.mask(raw_payload_bytes)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done unmasking payload data at %s MB/s',
+ payload_length / (time.time() - unmask_start) / 1000 / 1000)
+
+ return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
+
+
+class FragmentedFrameBuilder(object):
+ """A stateful class to send a message as fragments."""
+
+ def __init__(self, mask, frame_filters=[], encode_utf8=True):
+ """Constructs an instance."""
+
+ self._mask = mask
+ self._frame_filters = frame_filters
+ # This is for skipping UTF-8 encoding when building text type frames
+ # from compressed data.
+ self._encode_utf8 = encode_utf8
+
+ self._started = False
+
+ # Hold opcode of the first frame in messages to verify types of other
+ # frames in the message are all the same.
+ self._opcode = common.OPCODE_TEXT
+
+ def build(self, payload_data, end, binary):
+ if binary:
+ frame_type = common.OPCODE_BINARY
+ else:
+ frame_type = common.OPCODE_TEXT
+ if self._started:
+ if self._opcode != frame_type:
+ raise ValueError('Message types are different in frames for '
+ 'the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ opcode = frame_type
+ self._opcode = frame_type
+
+ if end:
+ self._started = False
+ fin = 1
+ else:
+ self._started = True
+ fin = 0
+
+ if binary or not self._encode_utf8:
+ return create_binary_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+ else:
+ return create_text_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+
+
+def _create_control_frame(opcode, body, mask, frame_filters):
+ frame = Frame(opcode=opcode, payload=body)
+
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ if len(frame.payload) > 125:
+ raise BadOperationException(
+ 'Payload data size of control frames must be 125 bytes or less')
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_ping_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
+
+
+def create_pong_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
+
+
+def create_close_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(
+ common.OPCODE_CLOSE, body, mask, frame_filters)
+
+
+def create_closing_handshake_body(code, reason):
+ body = ''
+ if code is not None:
+ if (code > common.STATUS_USER_PRIVATE_MAX or
+ code < common.STATUS_NORMAL_CLOSURE):
+ raise BadOperationException('Status code is out of range')
+ if (code == common.STATUS_NO_STATUS_RECEIVED or
+ code == common.STATUS_ABNORMAL_CLOSURE or
+ code == common.STATUS_TLS_HANDSHAKE):
+ raise BadOperationException('Status code is reserved pseudo '
+ 'code')
+ encoded_reason = reason.encode('utf-8')
+ body = struct.pack('!H', code) + encoded_reason
+ return body
+
+
+class StreamOptions(object):
+ """Holds option values to configure Stream objects."""
+
+ def __init__(self):
+ """Constructs StreamOptions."""
+
+ # Filters applied to frames.
+ self.outgoing_frame_filters = []
+ self.incoming_frame_filters = []
+
+ # Filters applied to messages. Control frames are not affected by them.
+ self.outgoing_message_filters = []
+ self.incoming_message_filters = []
+
+ self.encode_text_message_to_utf8 = True
+ self.mask_send = False
+ self.unmask_receive = True
+
+
+class Stream(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol
+ (RFC 6455).
+ """
+
+ def __init__(self, request, options):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ # Holds body of received fragments.
+ self._received_fragments = []
+ # Holds the opcode of the first fragment.
+ self._original_opcode = None
+
+ self._writer = FragmentedFrameBuilder(
+ self._options.mask_send, self._options.outgoing_frame_filters,
+ self._options.encode_text_message_to_utf8)
+
+ self._ping_queue = deque()
+
+ def _receive_frame(self):
+ """Receives a frame and return data in the frame as a tuple containing
+ each header field and payload separately.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ def _receive_bytes(length):
+ return self.receive_bytes(length)
+
+ return parse_frame(receive_bytes=_receive_bytes,
+ logger=self._logger,
+ ws_version=self._request.ws_version,
+ unmask_receive=self._options.unmask_receive)
+
+ def _receive_frame_as_frame_object(self):
+ opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+
+ return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
+ opcode=opcode, payload=unmasked_bytes)
+
+ def receive_filtered_frame(self):
+ """Receives a frame and applies frame filters and message filters.
+ The frame to be received must satisfy following conditions:
+ - The frame is not fragmented.
+ - The opcode of the frame is TEXT or BINARY.
+
+ DO NOT USE this method except for testing purpose.
+ """
+
+ frame = self._receive_frame_as_frame_object()
+ if not frame.fin:
+ raise InvalidFrameException(
+ 'Segmented frames must not be received via '
+ 'receive_filtered_frame()')
+ if (frame.opcode != common.OPCODE_TEXT and
+ frame.opcode != common.OPCODE_BINARY):
+ raise InvalidFrameException(
+ 'Control frames must not be received via '
+ 'receive_filtered_frame()')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+ for message_filter in self._options.incoming_message_filters:
+ frame.payload = message_filter.filter(frame.payload)
+ return frame
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: text in unicode or binary in str to send.
+ binary: send message as binary frame.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection or called with inconsistent message type or
+ binary parameter.
+ """
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ try:
+ # Set this to any positive integer to limit maximum size of data in
+ # payload data of each frame.
+ MAX_PAYLOAD_DATA_SIZE = -1
+
+ if MAX_PAYLOAD_DATA_SIZE <= 0:
+ self._write(self._writer.build(message, end, binary))
+ return
+
+ bytes_written = 0
+ while True:
+ end_for_this_frame = end
+ bytes_to_write = len(message) - bytes_written
+ if (MAX_PAYLOAD_DATA_SIZE > 0 and
+ bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
+ end_for_this_frame = False
+ bytes_to_write = MAX_PAYLOAD_DATA_SIZE
+
+ frame = self._writer.build(
+ message[bytes_written:bytes_written + bytes_to_write],
+ end_for_this_frame,
+ binary)
+ self._write(frame)
+
+ bytes_written += bytes_to_write
+
+ # This if must be placed here (the end of while block) so that
+ # at least one frame is sent.
+ if len(message) <= bytes_written:
+ break
+ except ValueError, e:
+ raise BadOperationException(e)
+
+ def _get_message_from_frame(self, frame):
+ """Gets a message from frame. If the message is composed of fragmented
+ frames and the frame is not the last fragmented frame, this method
+ returns None. The whole message will be returned when the last
+ fragmented frame is passed to this method.
+
+ Raises:
+ InvalidFrameException: when the frame doesn't match defragmentation
+ context, or the frame contains invalid data.
+ """
+
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ if not self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received a termination frame but fragmentation '
+ 'not started')
+ else:
+ raise InvalidFrameException(
+ 'Received an intermediate frame but '
+ 'fragmentation not started')
+
+ if frame.fin:
+ # End of fragmentation frame
+ self._received_fragments.append(frame.payload)
+ message = ''.join(self._received_fragments)
+ self._received_fragments = []
+ return message
+ else:
+ # Intermediate frame
+ self._received_fragments.append(frame.payload)
+ return None
+ else:
+ if self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received an unfragmented frame without '
+ 'terminating existing fragmentation')
+ else:
+ raise InvalidFrameException(
+ 'New fragmentation started without terminating '
+ 'existing fragmentation')
+
+ if frame.fin:
+ # Unfragmented frame
+
+ self._original_opcode = frame.opcode
+ return frame.payload
+ else:
+ # Start of fragmentation frame
+
+ if common.is_control_opcode(frame.opcode):
+ raise InvalidFrameException(
+ 'Control frames must not be fragmented')
+
+ self._original_opcode = frame.opcode
+ self._received_fragments.append(frame.payload)
+ return None
+
+ def _process_close_message(self, message):
+ """Processes close message.
+
+ Args:
+ message: close message.
+
+ Raises:
+ InvalidFrameException: when the message is invalid.
+ """
+
+ self._request.client_terminated = True
+
+ # Status code is optional. We can have status reason only if we
+ # have status code. Status reason can be empty string. So,
+ # allowed cases are
+ # - no application data: no code no reason
+ # - 2 octet of application data: has code but no reason
+ # - 3 or more octet of application data: both code and reason
+ if len(message) == 0:
+ self._logger.debug('Received close frame (empty body)')
+ self._request.ws_close_code = (
+ common.STATUS_NO_STATUS_RECEIVED)
+ elif len(message) == 1:
+ raise InvalidFrameException(
+ 'If a close frame has status code, the length of '
+ 'status code must be 2 octet')
+ elif len(message) >= 2:
+ self._request.ws_close_code = struct.unpack(
+ '!H', message[0:2])[0]
+ self._request.ws_close_reason = message[2:].decode(
+ 'utf-8', 'replace')
+ self._logger.debug(
+ 'Received close frame (code=%d, reason=%r)',
+ self._request.ws_close_code,
+ self._request.ws_close_reason)
+
+ # As we've received a close frame, no more data is coming over the
+ # socket. We can now safely close the socket without worrying about
+ # RST sending.
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing handshake')
+ return
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ code = common.STATUS_NORMAL_CLOSURE
+ reason = ''
+ if hasattr(self._request, '_dispatcher'):
+ dispatcher = self._request._dispatcher
+ code, reason = dispatcher.passive_closing_handshake(
+ self._request)
+ if code is None and reason is not None and len(reason) > 0:
+ self._logger.warning(
+ 'Handler specified reason despite code being None')
+ reason = ''
+ if reason is None:
+ reason = ''
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Acknowledged closing handshake initiated by the peer '
+ '(code=%r, reason=%r)', code, reason)
+
+ def _process_ping_message(self, message):
+ """Processes ping message.
+
+ Args:
+ message: ping message.
+ """
+
+ try:
+ handler = self._request.on_ping_handler
+ if handler:
+ handler(self._request, message)
+ return
+ except AttributeError, e:
+ pass
+ self._send_pong(message)
+
+ def _process_pong_message(self, message):
+ """Processes pong message.
+
+ Args:
+ message: pong message.
+ """
+
+ # TODO(tyoshino): Add ping timeout handling.
+
+ inflight_pings = deque()
+
+ while True:
+ try:
+ expected_body = self._ping_queue.popleft()
+ if expected_body == message:
+ # inflight_pings contains pings ignored by the
+ # other peer. Just forget them.
+ self._logger.debug(
+ 'Ping %r is acked (%d pings were ignored)',
+ expected_body, len(inflight_pings))
+ break
+ else:
+ inflight_pings.append(expected_body)
+ except IndexError, e:
+ # The received pong was unsolicited pong. Keep the
+ # ping queue as is.
+ self._ping_queue = inflight_pings
+ self._logger.debug('Received a unsolicited pong')
+ break
+
+ try:
+ handler = self._request.on_pong_handler
+ if handler:
+ handler(self._request, message)
+ except AttributeError, e:
+ pass
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Returns:
+ payload data of the frame
+ - as unicode instance if received text frame
+ - as str instance if received binary frame
+ or None iff received closing handshake.
+ Raises:
+ BadOperationException: when called on a client-terminated
+ connection.
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid
+ data.
+ UnsupportedFrameException: when the received frame has
+ flags, opcode we cannot handle. You can ignore this
+ exception and continue receiving the next frame.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+
+ frame = self._receive_frame_as_frame_object()
+
+ # Check the constraint on the payload size for control frames
+ # before extension processes the frame.
+ # See also http://tools.ietf.org/html/rfc6455#section-5.5
+ if (common.is_control_opcode(frame.opcode) and
+ len(frame.payload) > 125):
+ raise InvalidFrameException(
+ 'Payload data size of control frames must be 125 bytes or '
+ 'less')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+
+ if frame.rsv1 or frame.rsv2 or frame.rsv3:
+ raise UnsupportedFrameException(
+ 'Unsupported flag is set (rsv = %d%d%d)' %
+ (frame.rsv1, frame.rsv2, frame.rsv3))
+
+ message = self._get_message_from_frame(frame)
+ if message is None:
+ continue
+
+ for message_filter in self._options.incoming_message_filters:
+ message = message_filter.filter(message)
+
+ if self._original_opcode == common.OPCODE_TEXT:
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ try:
+ return message.decode('utf-8')
+ except UnicodeDecodeError, e:
+ raise InvalidUTF8Exception(e)
+ elif self._original_opcode == common.OPCODE_BINARY:
+ return message
+ elif self._original_opcode == common.OPCODE_CLOSE:
+ self._process_close_message(message)
+ return None
+ elif self._original_opcode == common.OPCODE_PING:
+ self._process_ping_message(message)
+ elif self._original_opcode == common.OPCODE_PONG:
+ self._process_pong_message(message)
+ else:
+ raise UnsupportedFrameException(
+ 'Opcode %d is not supported' % self._original_opcode)
+
+ def _send_closing_handshake(self, code, reason):
+ body = create_closing_handshake_body(code, reason)
+ frame = create_close_frame(
+ body, mask=self._options.mask_send,
+ frame_filters=self._options.outgoing_frame_filters)
+
+ self._request.server_terminated = True
+
+ self._write(frame)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
+ wait_response=True):
+ """Closes a WebSocket connection.
+
+ Args:
+ code: Status code for close frame. If code is None, a close
+ frame with empty body will be sent.
+ reason: string representing close reason.
+ wait_response: True when caller want to wait the response.
+ Raises:
+ BadOperationException: when reason is specified with code None
+ or reason is not an instance of both str and unicode.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if code is None:
+ if reason is not None and len(reason) > 0:
+ raise BadOperationException(
+ 'close reason must not be specified if code is None')
+ reason = ''
+ else:
+ if not isinstance(reason, str) and not isinstance(reason, unicode):
+ raise BadOperationException(
+ 'close reason must be an instance of str or unicode')
+
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Initiated closing handshake (code=%r, reason=%r)',
+ code, reason)
+
+ if (code == common.STATUS_GOING_AWAY or
+ code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
+ # It doesn't make sense to wait for a close frame if the reason is
+ # protocol error or that the server is going away. For some of
+ # other reasons, it might not make sense to wait for a close frame,
+ # but it's not clear, yet.
+ return
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body=''):
+ frame = create_ping_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ frame = create_pong_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ def get_last_received_opcode(self):
+ """Returns the opcode of the WebSocket message which the last received
+ frame belongs to. The return value is valid iff immediately after
+ receive_message call.
+ """
+
+ return self._original_opcode
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/common.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/common.py
new file mode 100644
index 000000000..2fc2ead64
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/common.py
@@ -0,0 +1,303 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file must not depend on any module specific to the WebSocket protocol.
+"""
+
+
+from mod_pywebsocket import http_header_util
+
+
+# Additional log level definitions.
+LOGLEVEL_FINE = 9
+
+# Constants indicating WebSocket protocol version.
+VERSION_HIXIE75 = -1
+VERSION_HYBI00 = 0
+VERSION_HYBI01 = 1
+VERSION_HYBI02 = 2
+VERSION_HYBI03 = 2
+VERSION_HYBI04 = 4
+VERSION_HYBI05 = 5
+VERSION_HYBI06 = 6
+VERSION_HYBI07 = 7
+VERSION_HYBI08 = 8
+VERSION_HYBI09 = 8
+VERSION_HYBI10 = 8
+VERSION_HYBI11 = 8
+VERSION_HYBI12 = 8
+VERSION_HYBI13 = 13
+VERSION_HYBI14 = 13
+VERSION_HYBI15 = 13
+VERSION_HYBI16 = 13
+VERSION_HYBI17 = 13
+
+# Constants indicating WebSocket protocol latest version.
+VERSION_HYBI_LATEST = VERSION_HYBI13
+
+# Port numbers
+DEFAULT_WEB_SOCKET_PORT = 80
+DEFAULT_WEB_SOCKET_SECURE_PORT = 443
+
+# Schemes
+WEB_SOCKET_SCHEME = 'ws'
+WEB_SOCKET_SECURE_SCHEME = 'wss'
+
+# Frame opcodes defined in the spec.
+OPCODE_CONTINUATION = 0x0
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+
+# UUIDs used by HyBi 04 and later opening handshake and frame masking.
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Opening handshake header names and expected values.
+UPGRADE_HEADER = 'Upgrade'
+WEBSOCKET_UPGRADE_TYPE = 'websocket'
+WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket'
+CONNECTION_HEADER = 'Connection'
+UPGRADE_CONNECTION_TYPE = 'Upgrade'
+HOST_HEADER = 'Host'
+ORIGIN_HEADER = 'Origin'
+SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin'
+SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key'
+SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept'
+SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version'
+SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol'
+SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions'
+SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft'
+SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1'
+SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
+SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
+
+# Extensions
+DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress'
+PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
+X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
+X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress'
+MUX_EXTENSION = 'mux_DO_NOT_USE'
+
+# Status codes
+# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and
+# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases.
+# Could not be used for codes in actual closing frames.
+# Application level errors must use codes in the range
+# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the
+# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed
+# by IANA. Usually application must define user protocol level errors in the
+# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX.
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXTENSION = 1010
+STATUS_INTERNAL_ENDPOINT_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+STATUS_USER_REGISTERED_BASE = 3000
+STATUS_USER_REGISTERED_MAX = 3999
+STATUS_USER_PRIVATE_BASE = 4000
+STATUS_USER_PRIVATE_MAX = 4999
+# Following definitions are aliases to keep compatibility. Applications must
+# not use these obsoleted definitions anymore.
+STATUS_NORMAL = STATUS_NORMAL_CLOSURE
+STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA
+STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED
+STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE
+STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA
+STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION
+
+# HTTP status codes
+HTTP_STATUS_BAD_REQUEST = 400
+HTTP_STATUS_FORBIDDEN = 403
+HTTP_STATUS_NOT_FOUND = 404
+
+
+def is_control_opcode(opcode):
+ return (opcode >> 3) == 1
+
+
+class ExtensionParameter(object):
+ """Holds information about an extension which is exchanged on extension
+ negotiation in opening handshake.
+ """
+
+ def __init__(self, name):
+ self._name = name
+ # TODO(tyoshino): Change the data structure to more efficient one such
+ # as dict when the spec changes to say like
+ # - Parameter names must be unique
+ # - The order of parameters is not significant
+ self._parameters = []
+
+ def name(self):
+ return self._name
+
+ def add_parameter(self, name, value):
+ self._parameters.append((name, value))
+
+ def get_parameters(self):
+ return self._parameters
+
+ def get_parameter_names(self):
+ return [name for name, unused_value in self._parameters]
+
+ def has_parameter(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return True
+ return False
+
+ def get_parameter_value(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return param_value
+
+
+class ExtensionParsingException(Exception):
+ def __init__(self, name):
+ super(ExtensionParsingException, self).__init__(name)
+
+
+def _parse_extension_param(state, definition):
+ param_name = http_header_util.consume_token(state)
+
+ if param_name is None:
+ raise ExtensionParsingException('No valid parameter name found')
+
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, '='):
+ definition.add_parameter(param_name, None)
+ return
+
+ http_header_util.consume_lwses(state)
+
+ # TODO(tyoshino): Add code to validate that parsed param_value is token
+ param_value = http_header_util.consume_token_or_quoted_string(state)
+ if param_value is None:
+ raise ExtensionParsingException(
+ 'No valid parameter value found on the right-hand side of '
+ 'parameter %r' % param_name)
+
+ definition.add_parameter(param_name, param_value)
+
+
+def _parse_extension(state):
+ extension_token = http_header_util.consume_token(state)
+ if extension_token is None:
+ return None
+
+ extension = ExtensionParameter(extension_token)
+
+ while True:
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, ';'):
+ break
+
+ http_header_util.consume_lwses(state)
+
+ try:
+ _parse_extension_param(state, extension)
+ except ExtensionParsingException, e:
+ raise ExtensionParsingException(
+ 'Failed to parse parameter for %r (%r)' %
+ (extension_token, e))
+
+ return extension
+
+
+def parse_extensions(data):
+ """Parses Sec-WebSocket-Extensions header value returns a list of
+ ExtensionParameter objects.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ extension_list = []
+ while True:
+ extension = _parse_extension(state)
+ if extension is not None:
+ extension_list.append(extension)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise ExtensionParsingException(
+ 'Failed to parse Sec-WebSocket-Extensions header: '
+ 'Expected a comma but found %r' %
+ http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(extension_list) == 0:
+ raise ExtensionParsingException(
+ 'No valid extension entry found')
+
+ return extension_list
+
+
+def format_extension(extension):
+ """Formats an ExtensionParameter object."""
+
+ formatted_params = [extension.name()]
+ for param_name, param_value in extension.get_parameters():
+ if param_value is None:
+ formatted_params.append(param_name)
+ else:
+ quoted_value = http_header_util.quote_if_necessary(param_value)
+ formatted_params.append('%s=%s' % (param_name, quoted_value))
+ return '; '.join(formatted_params)
+
+
+def format_extensions(extension_list):
+ """Formats a list of ExtensionParameter objects."""
+
+ formatted_extension_list = []
+ for extension in extension_list:
+ formatted_extension_list.append(format_extension(extension))
+ return ', '.join(formatted_extension_list)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/dispatch.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/dispatch.py
new file mode 100644
index 000000000..96c91e0c9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/dispatch.py
@@ -0,0 +1,393 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Dispatch WebSocket request.
+"""
+
+
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import msgutil
+from mod_pywebsocket import mux
+from mod_pywebsocket import stream
+from mod_pywebsocket import util
+
+
+_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
+_SOURCE_SUFFIX = '_wsh.py'
+_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
+_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
+_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
+ 'web_socket_passive_closing_handshake')
+
+
+class DispatchException(Exception):
+ """Exception in dispatching WebSocket request."""
+
+ def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
+ super(DispatchException, self).__init__(name)
+ self.status = status
+
+
+def _default_passive_closing_handshake_handler(request):
+ """Default web_socket_passive_closing_handshake handler."""
+
+ return common.STATUS_NORMAL_CLOSURE, ''
+
+
+def _normalize_path(path):
+ """Normalize path.
+
+ Args:
+ path: the path to normalize.
+
+ Path is converted to the absolute path.
+ The input path can use either '\\' or '/' as the separator.
+ The normalized path always uses '/' regardless of the platform.
+ """
+
+ path = path.replace('\\', os.path.sep)
+ path = os.path.realpath(path)
+ path = path.replace('\\', '/')
+ return path
+
+
+def _create_path_to_resource_converter(base_dir):
+ """Returns a function that converts the path of a WebSocket handler source
+ file to a resource string by removing the path to the base directory from
+ its head, removing _SOURCE_SUFFIX from its tail, and replacing path
+ separators in it with '/'.
+
+ Args:
+ base_dir: the path to the base directory.
+ """
+
+ base_dir = _normalize_path(base_dir)
+
+ base_len = len(base_dir)
+ suffix_len = len(_SOURCE_SUFFIX)
+
+ def converter(path):
+ if not path.endswith(_SOURCE_SUFFIX):
+ return None
+ # _normalize_path must not be used because resolving symlink breaks
+ # following path check.
+ path = path.replace('\\', '/')
+ if not path.startswith(base_dir):
+ return None
+ return path[base_len:-suffix_len]
+
+ return converter
+
+
+def _enumerate_handler_file_paths(directory):
+ """Returns a generator that enumerates WebSocket Handler source file names
+ in the given directory.
+ """
+
+ for root, unused_dirs, files in os.walk(directory):
+ for base in files:
+ path = os.path.join(root, base)
+ if _SOURCE_PATH_PATTERN.search(path):
+ yield path
+
+
+class _HandlerSuite(object):
+ """A handler suite holder class."""
+
+ def __init__(self, do_extra_handshake, transfer_data,
+ passive_closing_handshake):
+ self.do_extra_handshake = do_extra_handshake
+ self.transfer_data = transfer_data
+ self.passive_closing_handshake = passive_closing_handshake
+
+
+def _source_handler_file(handler_definition):
+ """Source a handler definition string.
+
+ Args:
+ handler_definition: a string containing Python statements that define
+ handler functions.
+ """
+
+ global_dic = {}
+ try:
+ exec handler_definition in global_dic
+ except Exception:
+ raise DispatchException('Error in sourcing handler:' +
+ util.get_stack_trace())
+ passive_closing_handshake_handler = None
+ try:
+ passive_closing_handshake_handler = _extract_handler(
+ global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
+ except Exception:
+ passive_closing_handshake_handler = (
+ _default_passive_closing_handshake_handler)
+ return _HandlerSuite(
+ _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
+ _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
+ passive_closing_handshake_handler)
+
+
+def _extract_handler(dic, name):
+ """Extracts a callable with the specified name from the given dictionary
+ dic.
+ """
+
+ if name not in dic:
+ raise DispatchException('%s is not defined.' % name)
+ handler = dic[name]
+ if not callable(handler):
+ raise DispatchException('%s is not callable.' % name)
+ return handler
+
+
+class Dispatcher(object):
+ """Dispatches WebSocket requests.
+
+ This class maintains a map from resource name to handlers.
+ """
+
+ def __init__(
+ self, root_dir, scan_dir=None,
+ allow_handlers_outside_root_dir=True):
+ """Construct an instance.
+
+ Args:
+ root_dir: The directory where handler definition files are
+ placed.
+ scan_dir: The directory where handler definition files are
+ searched. scan_dir must be a directory under root_dir,
+ including root_dir itself. If scan_dir is None,
+ root_dir is used as scan_dir. scan_dir can be useful
+ in saving scan time when root_dir contains many
+ subdirectories.
+ allow_handlers_outside_root_dir: Scans handler files even if their
+ canonical path is not under root_dir.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._handler_suite_map = {}
+ self._source_warnings = []
+ if scan_dir is None:
+ scan_dir = root_dir
+ if not os.path.realpath(scan_dir).startswith(
+ os.path.realpath(root_dir)):
+ raise DispatchException('scan_dir:%s must be a directory under '
+ 'root_dir:%s.' % (scan_dir, root_dir))
+ self._source_handler_files_in_dir(
+ root_dir, scan_dir, allow_handlers_outside_root_dir)
+
+ def add_resource_path_alias(self,
+ alias_resource_path, existing_resource_path):
+ """Add resource path alias.
+
+ Once added, request to alias_resource_path would be handled by
+ handler registered for existing_resource_path.
+
+ Args:
+ alias_resource_path: alias resource path
+ existing_resource_path: existing resource path
+ """
+ try:
+ handler_suite = self._handler_suite_map[existing_resource_path]
+ self._handler_suite_map[alias_resource_path] = handler_suite
+ except KeyError:
+ raise DispatchException('No handler for: %r' %
+ existing_resource_path)
+
+ def source_warnings(self):
+ """Return warnings in sourcing handlers."""
+
+ return self._source_warnings
+
+ def do_extra_handshake(self, request):
+ """Do extra checking in WebSocket handshake.
+
+ Select a handler based on request.uri and call its
+ web_socket_do_extra_handshake function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ HandshakeException: when opening handshake failed
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' % request.ws_resource)
+ do_extra_handshake_ = handler_suite.do_extra_handshake
+ try:
+ do_extra_handshake_(request)
+ except handshake.AbortedByUserException, e:
+ # Re-raise to tell the caller of this function to finish this
+ # connection without sending any error.
+ self._logger.debug('%s', util.get_stack_trace())
+ raise
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _DO_EXTRA_HANDSHAKE_HANDLER_NAME,
+ request.ws_resource),
+ e)
+ raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
+
+ def transfer_data(self, request):
+ """Let a handler transfer_data with a WebSocket client.
+
+ Select a handler based on request.ws_resource and call its
+ web_socket_transfer_data function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ """
+
+ # TODO(tyoshino): Terminate underlying TCP connection if possible.
+ try:
+ if mux.use_mux(request):
+ mux.start(request, self)
+ else:
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' %
+ request.ws_resource)
+ transfer_data_ = handler_suite.transfer_data
+ transfer_data_(request)
+
+ if not request.server_terminated:
+ request.ws_stream.close_connection()
+ # Catch non-critical exceptions the handler didn't handle.
+ except handshake.AbortedByUserException, e:
+ self._logger.debug('%s', util.get_stack_trace())
+ raise
+ except msgutil.BadOperationException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INTERNAL_ENDPOINT_ERROR)
+ except msgutil.InvalidFrameException, e:
+ # InvalidFrameException must be caught before
+ # ConnectionTerminatedException that catches InvalidFrameException.
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
+ except msgutil.UnsupportedFrameException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
+ except stream.InvalidUTF8Exception, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+ except msgutil.ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ except Exception, e:
+ # Any other exceptions are forwarded to the caller of this
+ # function.
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
+ e)
+ raise
+
+ def passive_closing_handshake(self, request):
+ """Prepare code and reason for responding client initiated closing
+ handshake.
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ return _default_passive_closing_handshake_handler(request)
+ return handler_suite.passive_closing_handshake(request)
+
+ def get_handler_suite(self, resource):
+ """Retrieves two handlers (one for extra handshake processing, and one
+ for data transfer) for the given request as a HandlerSuite object.
+ """
+
+ fragment = None
+ if '#' in resource:
+ resource, fragment = resource.split('#', 1)
+ if '?' in resource:
+ resource = resource.split('?', 1)[0]
+ handler_suite = self._handler_suite_map.get(resource)
+ if handler_suite and fragment:
+ raise DispatchException('Fragment identifiers MUST NOT be used on '
+ 'WebSocket URIs',
+ common.HTTP_STATUS_BAD_REQUEST)
+ return handler_suite
+
+ def _source_handler_files_in_dir(
+ self, root_dir, scan_dir, allow_handlers_outside_root_dir):
+ """Source all the handler source files in the scan_dir directory.
+
+ The resource path is determined relative to root_dir.
+ """
+
+ # We build a map from resource to handler code assuming that there's
+ # only one path from root_dir to scan_dir and it can be obtained by
+ # comparing realpath of them.
+
+ # Here we cannot use abspath. See
+ # https://bugs.webkit.org/show_bug.cgi?id=31603
+
+ convert = _create_path_to_resource_converter(root_dir)
+ scan_realpath = os.path.realpath(scan_dir)
+ root_realpath = os.path.realpath(root_dir)
+ for path in _enumerate_handler_file_paths(scan_realpath):
+ if (not allow_handlers_outside_root_dir and
+ (not os.path.realpath(path).startswith(root_realpath))):
+ self._logger.debug(
+ 'Canonical path of %s is not under root directory' %
+ path)
+ continue
+ try:
+ handler_suite = _source_handler_file(open(path).read())
+ except DispatchException, e:
+ self._source_warnings.append('%s: %s' % (path, e))
+ continue
+ resource = convert(path)
+ if resource is None:
+ self._logger.debug(
+ 'Path to resource conversion on %s failed' % path)
+ else:
+ self._handler_suite_map[convert(path)] = handler_suite
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py
new file mode 100644
index 000000000..49a9fdcf9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/extensions.py
@@ -0,0 +1,885 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket.http_header_util import quote_if_necessary
+
+
+# The list of available server side extension processor classes.
+_available_processors = {}
+_compression_extension_names = []
+
+
+class ExtensionProcessorInterface(object):
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._active = True
+
+ def request(self):
+ return self._request
+
+ def name(self):
+ return None
+
+ def check_consistency_with_other_processors(self, processors):
+ pass
+
+ def set_active(self, active):
+ self._active = active
+
+ def is_active(self):
+ return self._active
+
+ def _get_extension_response_internal(self):
+ return None
+
+ def get_extension_response(self):
+ if not self._active:
+ self._logger.debug('Extension %s is deactivated', self.name())
+ return None
+
+ response = self._get_extension_response_internal()
+ if response is None:
+ self._active = False
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ pass
+
+ def setup_stream_options(self, stream_options):
+ if self._active:
+ self._setup_stream_options_internal(stream_options)
+
+
+def _log_outgoing_compression_ratio(
+ logger, original_bytes, filtered_bytes, average_ratio):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ if original_bytes != 0:
+ ratio = float(filtered_bytes) / original_bytes
+
+ logger.debug('Outgoing compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _log_incoming_compression_ratio(
+ logger, received_bytes, filtered_bytes, average_ratio):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ if filtered_bytes != 0:
+ ratio = float(received_bytes) / filtered_bytes
+
+ logger.debug('Incoming compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _parse_window_bits(bits):
+ """Return parsed integer value iff the given string conforms to the
+ grammar of the window bits extension parameters.
+ """
+
+ if bits is None:
+ raise ValueError('Value is required')
+
+ # For non integer values such as "10.0", ValueError will be raised.
+ int_bits = int(bits)
+
+ # First condition is to drop leading zero case e.g. "08".
+ if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
+ raise ValueError('Invalid value: %r' % bits)
+
+ return int_bits
+
+
+class _AverageRatioCalculator(object):
+ """Stores total bytes of original and result data, and calculates average
+ result / original ratio.
+ """
+
+ def __init__(self):
+ self._total_original_bytes = 0
+ self._total_result_bytes = 0
+
+ def add_original_bytes(self, value):
+ self._total_original_bytes += value
+
+ def add_result_bytes(self, value):
+ self._total_result_bytes += value
+
+ def get_average_ratio(self):
+ if self._total_original_bytes != 0:
+ return (float(self._total_result_bytes) /
+ self._total_original_bytes)
+ else:
+ return float('inf')
+
+
+class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
+ """deflate-frame extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
+ """
+
+ _WINDOW_BITS_PARAM = 'max_window_bits'
+ _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+
+ self._response_window_bits = None
+ self._response_no_context_takeover = False
+ self._bfinal = False
+
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
+
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
+
+ def name(self):
+ return common.DEFLATE_FRAME_EXTENSION
+
+ def _get_extension_response_internal(self):
+ # Any unknown parameter will be just ignored.
+
+ window_bits = None
+ if self._request.has_parameter(self._WINDOW_BITS_PARAM):
+ window_bits = self._request.get_parameter_value(
+ self._WINDOW_BITS_PARAM)
+ try:
+ window_bits = _parse_window_bits(window_bits)
+ except ValueError, e:
+ return None
+
+ no_context_takeover = self._request.has_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM)
+ if (no_context_takeover and
+ self._request.get_parameter_value(
+ self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ window_bits, no_context_takeover)
+
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._compress_outgoing = True
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if self._response_window_bits is not None:
+ response.add_parameter(
+ self._WINDOW_BITS_PARAM, str(self._response_window_bits))
+ if self._response_no_context_takeover:
+ response.add_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: window_bits=%s; no_context_takeover=%r, '
+ 'response: window_wbits=%s; no_context_takeover=%r)' %
+ (self._request.name(),
+ window_bits,
+ no_context_takeover,
+ self._response_window_bits,
+ self._response_no_context_takeover))
+
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+
+ class _OutgoingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._outgoing_filter(frame)
+
+ class _IncomingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._incoming_filter(frame)
+
+ stream_options.outgoing_frame_filters.append(
+ _OutgoingFilter(self))
+ stream_options.incoming_frame_filters.insert(
+ 0, _IncomingFilter(self))
+
+ def set_response_window_bits(self, value):
+ self._response_window_bits = value
+
+ def set_response_no_context_takeover(self, value):
+ self._response_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def enable_outgoing_compression(self):
+ self._compress_outgoing = True
+
+ def disable_outgoing_compression(self):
+ self._compress_outgoing = False
+
+ def _outgoing_filter(self, frame):
+ """Transform outgoing frames. This method is called only by
+ an _OutgoingFilter instance.
+ """
+
+ original_payload_size = len(frame.payload)
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
+
+ if (not self._compress_outgoing or
+ common.is_control_opcode(frame.opcode)):
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ original_payload_size)
+ return
+
+ frame.payload = self._rfc1979_deflater.filter(
+ frame.payload, bfinal=self._bfinal)
+ frame.rsv1 = 1
+
+ filtered_payload_size = len(frame.payload)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
+
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
+
+ def _incoming_filter(self, frame):
+ """Transform incoming frames. This method is called only by
+ an _IncomingFilter instance.
+ """
+
+ received_payload_size = len(frame.payload)
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
+
+ if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ received_payload_size)
+ return
+
+ frame.payload = self._rfc1979_inflater.filter(frame.payload)
+ frame.rsv1 = 0
+
+ filtered_payload_size = len(frame.payload)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
+
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
+
+
+_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
+
+_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
+
+
+def _parse_compression_method(data):
+ """Parses the value of "method" extension parameter."""
+
+ return common.parse_extensions(data)
+
+
+def _create_accepted_method_desc(method_name, method_params):
+ """Creates accepted-method-desc from given method name and parameters"""
+
+ extension = common.ExtensionParameter(method_name)
+ for name, value in method_params:
+ extension.add_parameter(name, value)
+ return common.format_extension(extension)
+
+
+class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
+ """Base class for perframe-compress and permessage-compress extension."""
+
+ _METHOD_PARAM = 'method'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+ self._compression_method_name = None
+ self._compression_processor = None
+ self._compression_processor_hook = None
+
+ def name(self):
+ return ''
+
+ def _lookup_compression_processor(self, method_desc):
+ return None
+
+ def _get_compression_processor_response(self):
+ """Looks up the compression processor based on the self._request and
+ returns the compression processor's response.
+ """
+
+ method_list = self._request.get_parameter_value(self._METHOD_PARAM)
+ if method_list is None:
+ return None
+ methods = _parse_compression_method(method_list)
+ if methods is None:
+ return None
+ comression_processor = None
+ # The current implementation tries only the first method that matches
+ # supported algorithm. Following methods aren't tried even if the
+ # first one is rejected.
+ # TODO(bashi): Need to clarify this behavior.
+ for method_desc in methods:
+ compression_processor = self._lookup_compression_processor(
+ method_desc)
+ if compression_processor is not None:
+ self._compression_method_name = method_desc.name()
+ break
+ if compression_processor is None:
+ return None
+
+ if self._compression_processor_hook:
+ self._compression_processor_hook(compression_processor)
+
+ processor_response = compression_processor.get_extension_response()
+ if processor_response is None:
+ return None
+ self._compression_processor = compression_processor
+ return processor_response
+
+ def _get_extension_response_internal(self):
+ processor_response = self._get_compression_processor_response()
+ if processor_response is None:
+ return None
+
+ response = common.ExtensionParameter(self._request.name())
+ accepted_method_desc = _create_accepted_method_desc(
+ self._compression_method_name,
+ processor_response.get_parameters())
+ response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
+ self._logger.debug(
+ 'Enable %s extension (method: %s)' %
+ (self._request.name(), self._compression_method_name))
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ if self._compression_processor is None:
+ return
+ self._compression_processor.setup_stream_options(stream_options)
+
+ def set_compression_processor_hook(self, hook):
+ self._compression_processor_hook = hook
+
+ def get_compression_processor(self):
+ return self._compression_processor
+
+
+class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
+ """permessage-deflate extension processor. It's also used for
+ permessage-compress extension when the deflate method is chosen.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
+ """
+
+ _SERVER_MAX_WINDOW_BITS_PARAM = 'server_max_window_bits'
+ _SERVER_NO_CONTEXT_TAKEOVER_PARAM = 'server_no_context_takeover'
+ _CLIENT_MAX_WINDOW_BITS_PARAM = 'client_max_window_bits'
+ _CLIENT_NO_CONTEXT_TAKEOVER_PARAM = 'client_no_context_takeover'
+
+ def __init__(self, request, draft08=True):
+ """Construct PerMessageDeflateExtensionProcessor
+
+ Args:
+ draft08: Follow the constraints on the parameters that were not
+ specified for permessage-compress but are specified for
+ permessage-deflate as on
+ draft-ietf-hybi-permessage-compression-08.
+ """
+
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+
+ self._preferred_client_max_window_bits = None
+ self._client_no_context_takeover = False
+
+ self._draft08 = draft08
+
+ def name(self):
+ return 'deflate'
+
+ def _get_extension_response_internal(self):
+ if self._draft08:
+ for name in self._request.get_parameter_names():
+ if name not in [self._SERVER_MAX_WINDOW_BITS_PARAM,
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
+ self._CLIENT_MAX_WINDOW_BITS_PARAM]:
+ self._logger.debug('Unknown parameter: %r', name)
+ return None
+ else:
+ # Any unknown parameter will be just ignored.
+ pass
+
+ server_max_window_bits = None
+ if self._request.has_parameter(self._SERVER_MAX_WINDOW_BITS_PARAM):
+ server_max_window_bits = self._request.get_parameter_value(
+ self._SERVER_MAX_WINDOW_BITS_PARAM)
+ try:
+ server_max_window_bits = _parse_window_bits(
+ server_max_window_bits)
+ except ValueError, e:
+ self._logger.debug('Bad %s parameter: %r',
+ self._SERVER_MAX_WINDOW_BITS_PARAM,
+ e)
+ return None
+
+ server_no_context_takeover = self._request.has_parameter(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM)
+ if (server_no_context_takeover and
+ self._request.get_parameter_value(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value: %r',
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
+ server_no_context_takeover)
+ return None
+
+ # client_max_window_bits from a client indicates whether the client can
+ # accept client_max_window_bits from a server or not.
+ client_client_max_window_bits = self._request.has_parameter(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM)
+ if (self._draft08 and
+ client_client_max_window_bits and
+ self._request.get_parameter_value(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value in a '
+ 'client\'s opening handshake: %r',
+ self._CLIENT_MAX_WINDOW_BITS_PARAM,
+ client_client_max_window_bits)
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ server_max_window_bits, server_no_context_takeover)
+
+ # Note that we prepare for incoming messages compressed with window
+ # bits upto 15 regardless of the client_max_window_bits value to be
+ # sent to the client.
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._framer = _PerMessageDeflateFramer(
+ server_max_window_bits, server_no_context_takeover)
+ self._framer.set_bfinal(False)
+ self._framer.set_compress_outgoing_enabled(True)
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if server_max_window_bits is not None:
+ response.add_parameter(
+ self._SERVER_MAX_WINDOW_BITS_PARAM,
+ str(server_max_window_bits))
+
+ if server_no_context_takeover:
+ response.add_parameter(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ if self._preferred_client_max_window_bits is not None:
+ if self._draft08 and not client_client_max_window_bits:
+ self._logger.debug('Processor is configured to use %s but '
+ 'the client cannot accept it',
+ self._CLIENT_MAX_WINDOW_BITS_PARAM)
+ return None
+ response.add_parameter(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM,
+ str(self._preferred_client_max_window_bits))
+
+ if self._client_no_context_takeover:
+ response.add_parameter(
+ self._CLIENT_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: server_max_window_bits=%s; '
+ 'server_no_context_takeover=%r, '
+ 'response: client_max_window_bits=%s; '
+ 'client_no_context_takeover=%r)' %
+ (self._request.name(),
+ server_max_window_bits,
+ server_no_context_takeover,
+ self._preferred_client_max_window_bits,
+ self._client_no_context_takeover))
+
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ self._framer.setup_stream_options(stream_options)
+
+ def set_client_max_window_bits(self, value):
+ """If this option is specified, this class adds the
+ client_max_window_bits extension parameter to the handshake response,
+ but doesn't reduce the LZ77 sliding window size of its inflater.
+ I.e., you can use this for testing client implementation but cannot
+ reduce memory usage of this class.
+
+ If this method has been called with True and an offer without the
+ client_max_window_bits extension parameter is received,
+ - (When processing the permessage-deflate extension) this processor
+ declines the request.
+ - (When processing the permessage-compress extension) this processor
+ accepts the request.
+ """
+
+ self._preferred_client_max_window_bits = value
+
+ def set_client_no_context_takeover(self, value):
+ """If this option is specified, this class adds the
+ client_no_context_takeover extension parameter to the handshake
+ response, but doesn't reset inflater for each message. I.e., you can
+ use this for testing client implementation but cannot reduce memory
+ usage of this class.
+ """
+
+ self._client_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._framer.set_bfinal(value)
+
+ def enable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(True)
+
+ def disable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(False)
+
+
+class _PerMessageDeflateFramer(object):
+ """A framer for extensions with per-message DEFLATE feature."""
+
+ def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
+ self._logger = util.get_class_logger(self)
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ deflate_max_window_bits, deflate_no_context_takeover)
+
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._bfinal = False
+
+ self._compress_outgoing_enabled = False
+
+ # True if a message is fragmented and compression is ongoing.
+ self._compress_ongoing = False
+
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
+
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def set_compress_outgoing_enabled(self, value):
+ self._compress_outgoing_enabled = value
+
+ def _process_incoming_message(self, message, decompress):
+ if not decompress:
+ return message
+
+ received_payload_size = len(message)
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
+
+ message = self._rfc1979_inflater.filter(message)
+
+ filtered_payload_size = len(message)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
+
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
+
+ return message
+
+ def _process_outgoing_message(self, message, end, binary):
+ if not binary:
+ message = message.encode('utf-8')
+
+ if not self._compress_outgoing_enabled:
+ return message
+
+ original_payload_size = len(message)
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
+
+ message = self._rfc1979_deflater.filter(
+ message, end=end, bfinal=self._bfinal)
+
+ filtered_payload_size = len(message)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
+
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
+
+ if not self._compress_ongoing:
+ self._outgoing_frame_filter.set_compression_bit()
+ self._compress_ongoing = not end
+ return message
+
+ def _process_incoming_frame(self, frame):
+ if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
+ self._incoming_message_filter.decompress_next_message()
+ frame.rsv1 = 0
+
+ def _process_outgoing_frame(self, frame, compression_bit):
+ if (not compression_bit or
+ common.is_control_opcode(frame.opcode)):
+ return
+
+ frame.rsv1 = 1
+
+ def setup_stream_options(self, stream_options):
+ """Creates filters and sets them to the StreamOptions."""
+
+ class _OutgoingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, message, end=True, binary=False):
+ return self._parent._process_outgoing_message(
+ message, end, binary)
+
+ class _IncomingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._decompress_next_message = False
+
+ def decompress_next_message(self):
+ self._decompress_next_message = True
+
+ def filter(self, message):
+ message = self._parent._process_incoming_message(
+ message, self._decompress_next_message)
+ self._decompress_next_message = False
+ return message
+
+ self._outgoing_message_filter = _OutgoingMessageFilter(self)
+ self._incoming_message_filter = _IncomingMessageFilter(self)
+ stream_options.outgoing_message_filters.append(
+ self._outgoing_message_filter)
+ stream_options.incoming_message_filters.append(
+ self._incoming_message_filter)
+
+ class _OutgoingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._set_compression_bit = False
+
+ def set_compression_bit(self):
+ self._set_compression_bit = True
+
+ def filter(self, frame):
+ self._parent._process_outgoing_frame(
+ frame, self._set_compression_bit)
+ self._set_compression_bit = False
+
+ class _IncomingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._process_incoming_frame(frame)
+
+ self._outgoing_frame_filter = _OutgoingFrameFilter(self)
+ self._incoming_frame_filter = _IncomingFrameFilter(self)
+ stream_options.outgoing_frame_filters.append(
+ self._outgoing_frame_filter)
+ stream_options.incoming_frame_filters.append(
+ self._incoming_frame_filter)
+
+ stream_options.encode_text_message_to_utf8 = False
+
+
+_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
+ PerMessageDeflateExtensionProcessor)
+# TODO(tyoshino): Reorganize class names.
+_compression_extension_names.append('deflate')
+
+
+class PerMessageCompressExtensionProcessor(
+ CompressionExtensionProcessorBase):
+ """permessage-compress extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
+ """
+
+ _DEFLATE_METHOD = 'deflate'
+
+ def __init__(self, request):
+ CompressionExtensionProcessorBase.__init__(self, request)
+
+ def name(self):
+ return common.PERMESSAGE_COMPRESSION_EXTENSION
+
+ def _lookup_compression_processor(self, method_desc):
+ if method_desc.name() == self._DEFLATE_METHOD:
+ return PerMessageDeflateExtensionProcessor(method_desc, False)
+ return None
+
+
+_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
+ PerMessageCompressExtensionProcessor)
+_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
+
+
+class MuxExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket multiplexing extension processor."""
+
+ _QUOTA_PARAM = 'quota'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._quota = 0
+ self._extensions = []
+
+ def name(self):
+ return common.MUX_EXTENSION
+
+ def check_consistency_with_other_processors(self, processors):
+ before_mux = True
+ for processor in processors:
+ name = processor.name()
+ if name == self.name():
+ before_mux = False
+ continue
+ if not processor.is_active():
+ continue
+ if before_mux:
+ # Mux extension cannot be used after extensions
+ # that depend on frame boundary, extension data field, or any
+ # reserved bits which are attributed to each frame.
+ if (name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+ self.set_active(False)
+ return
+ else:
+ # Mux extension should not be applied before any history-based
+ # compression extension.
+ if (name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
+ name == common.PERMESSAGE_COMPRESSION_EXTENSION or
+ name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
+ self.set_active(False)
+ return
+
+ def _get_extension_response_internal(self):
+ self._active = False
+ quota = self._request.get_parameter_value(self._QUOTA_PARAM)
+ if quota is not None:
+ try:
+ quota = int(quota)
+ except ValueError, e:
+ return None
+ if quota < 0 or quota >= 2 ** 32:
+ return None
+ self._quota = quota
+
+ self._active = True
+ return common.ExtensionParameter(common.MUX_EXTENSION)
+
+ def _setup_stream_options_internal(self, stream_options):
+ pass
+
+ def set_quota(self, quota):
+ self._quota = quota
+
+ def quota(self):
+ return self._quota
+
+ def set_extensions(self, extensions):
+ self._extensions = extensions
+
+ def extensions(self):
+ return self._extensions
+
+
+_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
+
+
+def get_extension_processor(extension_request):
+ """Given an ExtensionParameter representing an extension offer received
+ from a client, configures and returns an instance of the corresponding
+ extension processor class.
+ """
+
+ processor_class = _available_processors.get(extension_request.name())
+ if processor_class is None:
+ return None
+ return processor_class(extension_request)
+
+
+def is_compression_extension(extension_name):
+ return extension_name in _compression_extension_names
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/fast_masking.i b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/fast_masking.i
new file mode 100644
index 000000000..ddaad27f5
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/fast_masking.i
@@ -0,0 +1,98 @@
+// Copyright 2013, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+%module fast_masking
+
+%include "cstring.i"
+
+%{
+#include <cstring>
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+%}
+
+%apply (char *STRING, int LENGTH) {
+ (const char* payload, int payload_length),
+ (const char* masking_key, int masking_key_length) };
+%cstring_output_allocate_size(
+ char** result, int* result_length, delete [] *$1);
+
+%inline %{
+
+void mask(
+ const char* payload, int payload_length,
+ const char* masking_key, int masking_key_length,
+ int masking_key_index,
+ char** result, int* result_length) {
+ *result = new char[payload_length];
+ *result_length = payload_length;
+ memcpy(*result, payload, payload_length);
+
+ char* cursor = *result;
+ char* cursor_end = *result + *result_length;
+
+#ifdef __SSE2__
+ while ((cursor < cursor_end) &&
+ (reinterpret_cast<size_t>(cursor) & 0xf)) {
+ *cursor ^= masking_key[masking_key_index];
+ ++cursor;
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+ if (cursor == cursor_end) {
+ return;
+ }
+
+ const int kBlockSize = 16;
+ __m128i masking_key_block;
+ for (int i = 0; i < kBlockSize; ++i) {
+ *(reinterpret_cast<char*>(&masking_key_block) + i) =
+ masking_key[masking_key_index];
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+
+ while (cursor + kBlockSize <= cursor_end) {
+ __m128i payload_block =
+ _mm_load_si128(reinterpret_cast<__m128i*>(cursor));
+ _mm_stream_si128(reinterpret_cast<__m128i*>(cursor),
+ _mm_xor_si128(payload_block, masking_key_block));
+ cursor += kBlockSize;
+ }
+#endif
+
+ while (cursor < cursor_end) {
+ *cursor ^= masking_key[masking_key_index];
+ ++cursor;
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+}
+
+%}
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/__init__.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/__init__.py
new file mode 100644
index 000000000..194f6b395
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/__init__.py
@@ -0,0 +1,110 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket opening handshake processor. This class try to apply available
+opening handshake processors for each protocol version until a connection is
+successfully established.
+"""
+
+
+import logging
+
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake import hybi00
+from mod_pywebsocket.handshake import hybi
+# Export AbortedByUserException, HandshakeException, and VersionException
+# symbol from this module.
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
+ """Performs WebSocket handshake.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+ allowDraft75: obsolete argument. ignored.
+ strict: obsolete argument. ignored.
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
+ # To print mimetools.Message as escaped one-line string, we converts
+ # headers_in to dict object. Without conversion, if we use %r, it just
+ # prints the type and address, and if we use %s, it prints the original
+ # header string as multiple lines.
+ #
+ # Both mimetools.Message and MpTable_Type of mod_python can be
+ # converted to dict.
+ #
+ # mimetools.Message.__str__ returns the original header string.
+ # dict(mimetools.Message object) returns the map from header names to
+ # header values. While MpTable_Type doesn't have such __str__ but just
+ # __repr__ which formats itself as well as dictionary object.
+ _LOGGER.debug(
+ 'Client\'s opening handshake headers: %r', dict(request.headers_in))
+
+ handshakers = []
+ handshakers.append(
+ ('RFC 6455', hybi.Handshaker(request, dispatcher)))
+ handshakers.append(
+ ('HyBi 00', hybi00.Handshaker(request, dispatcher)))
+
+ for name, handshaker in handshakers:
+ _LOGGER.debug('Trying protocol version %s', name)
+ try:
+ handshaker.do_handshake()
+ _LOGGER.info('Established (%s protocol)', name)
+ return
+ except HandshakeException, e:
+ _LOGGER.debug(
+ 'Failed to complete opening handshake as %s protocol: %r',
+ name, e)
+ if e.status:
+ raise e
+ except AbortedByUserException, e:
+ raise
+ except VersionException, e:
+ raise
+
+ # TODO(toyoshim): Add a test to cover the case all handshakers fail.
+ raise HandshakeException(
+ 'Failed to complete opening handshake for all available protocols',
+ status=common.HTTP_STATUS_BAD_REQUEST)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py
new file mode 100644
index 000000000..c993a584b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py
@@ -0,0 +1,182 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Common functions and exceptions used by WebSocket opening handshake
+processors.
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import http_header_util
+
+
+class AbortedByUserException(Exception):
+ """Exception for aborting a connection intentionally.
+
+ If this exception is raised in do_extra_handshake handler, the connection
+ will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
+
+ If this exception is raised in transfer_data_handler, the connection will
+ be closed without closing handshake. No other WebSocket or HTTP(S) handler
+ will be invoked.
+ """
+
+ pass
+
+
+class HandshakeException(Exception):
+ """This exception will be raised when an error occurred while processing
+ WebSocket initial handshake.
+ """
+
+ def __init__(self, name, status=None):
+ super(HandshakeException, self).__init__(name)
+ self.status = status
+
+
+class VersionException(Exception):
+ """This exception will be raised when a version of client request does not
+ match with version the server supports.
+ """
+
+ def __init__(self, name, supported_versions=''):
+ """Construct an instance.
+
+ Args:
+ supported_version: a str object to show supported hybi versions.
+ (e.g. '8, 13')
+ """
+ super(VersionException, self).__init__(name)
+ self.supported_versions = supported_versions
+
+
+def get_default_port(is_secure):
+ if is_secure:
+ return common.DEFAULT_WEB_SOCKET_SECURE_PORT
+ else:
+ return common.DEFAULT_WEB_SOCKET_PORT
+
+
+def validate_subprotocol(subprotocol):
+ """Validate a value in the Sec-WebSocket-Protocol field.
+
+ See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be encoded HTTP token.
+ state = http_header_util.ParsingState(subprotocol)
+ token = http_header_util.consume_token(state)
+ rest = http_header_util.peek(state)
+ # If |rest| is not None, |subprotocol| is not one token or invalid. If
+ # |rest| is None, |token| must not be None because |subprotocol| is
+ # concatenation of |token| and |rest| and is not None.
+ if rest is not None:
+ raise HandshakeException('Invalid non-token string in subprotocol '
+ 'name: %r' % rest)
+
+
+def parse_host_header(request):
+ fields = request.headers_in[common.HOST_HEADER].split(':', 1)
+ if len(fields) == 1:
+ return fields[0], get_default_port(request.is_https())
+ try:
+ return fields[0], int(fields[1])
+ except ValueError, e:
+ raise HandshakeException('Invalid port number format: %r' % e)
+
+
+def format_header(name, value):
+ return '%s: %s\r\n' % (name, value)
+
+
+def get_mandatory_header(request, key):
+ value = request.headers_in.get(key)
+ if value is None:
+ raise HandshakeException('Header %s is not defined' % key)
+ return value
+
+
+def validate_mandatory_header(request, key, expected_value, fail_status=None):
+ value = get_mandatory_header(request, key)
+
+ if value.lower() != expected_value.lower():
+ raise HandshakeException(
+ 'Expected %r for header %s but found %r (case-insensitive)' %
+ (expected_value, key, value), status=fail_status)
+
+
+def check_request_line(request):
+ # 5.1 1. The three character UTF-8 string "GET".
+ # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
+ if request.method != 'GET':
+ raise HandshakeException('Method is not GET: %r' % request.method)
+
+ if request.protocol != 'HTTP/1.1':
+ raise HandshakeException('Version is not HTTP/1.1: %r' %
+ request.protocol)
+
+
+def parse_token_list(data):
+ """Parses a header value which follows 1#token and returns parsed elements
+ as a list of strings.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ token_list = []
+
+ while True:
+ token = http_header_util.consume_token(state)
+ if token is not None:
+ token_list.append(token)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise HandshakeException(
+ 'Expected a comma but found %r' % http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(token_list) == 0:
+ raise HandshakeException('No valid token found')
+
+ return token_list
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py
new file mode 100644
index 000000000..1ad10ea37
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py
@@ -0,0 +1,420 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import base64
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.extensions import is_compression_extension
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import parse_token_list
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
+# disallows non-zero padding, so the character right before == must be any of
+# A, Q, g and w.
+_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
+
+# Defining aliases for values used frequently.
+_VERSION_LATEST = common.VERSION_HYBI_LATEST
+_VERSION_LATEST_STRING = str(_VERSION_LATEST)
+_SUPPORTED_VERSIONS = [
+ _VERSION_LATEST,
+]
+
+
+def compute_accept(key):
+ """Computes value for the Sec-WebSocket-Accept header from value of the
+ Sec-WebSocket-Key header.
+ """
+
+ accept_binary = util.sha1_hash(
+ key + common.WEBSOCKET_ACCEPT_UUID).digest()
+ accept = base64.b64encode(accept_binary)
+
+ return (accept, accept_binary)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource during handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def _validate_connection_header(self):
+ connection = get_mandatory_header(
+ self._request, common.CONNECTION_HEADER)
+
+ try:
+ connection_tokens = parse_token_list(connection)
+ except HandshakeException, e:
+ raise HandshakeException(
+ 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
+
+ connection_is_valid = False
+ for token in connection_tokens:
+ if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
+ connection_is_valid = True
+ break
+ if not connection_is_valid:
+ raise HandshakeException(
+ '%s header doesn\'t contain "%s"' %
+ (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+
+ def do_handshake(self):
+ self._request.ws_close_code = None
+ self._request.ws_close_reason = None
+
+ # Parsing.
+
+ check_request_line(self._request)
+
+ validate_mandatory_header(
+ self._request,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE)
+
+ self._validate_connection_header()
+
+ self._request.ws_resource = self._request.uri
+
+ unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
+
+ self._request.ws_version = self._check_version()
+
+ try:
+ self._get_origin()
+ self._set_protocol()
+ self._parse_extensions()
+
+ # Key validation, response generation.
+
+ key = self._get_key()
+ (accept, accept_binary) = compute_accept(key)
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_ACCEPT_HEADER,
+ accept,
+ util.hexify(accept_binary))
+
+ self._logger.debug('Protocol version is RFC 6455')
+
+ # Setup extension processors.
+
+ processors = []
+ if self._request.ws_requested_extensions is not None:
+ for extension_request in self._request.ws_requested_extensions:
+ processor = get_extension_processor(extension_request)
+ # Unknown extension requests are just ignored.
+ if processor is not None:
+ processors.append(processor)
+ self._request.ws_extension_processors = processors
+
+ # List of extra headers. The extra handshake handler may add header
+ # data as name/value pairs to this list and pywebsocket appends
+ # them to the WebSocket handshake.
+ self._request.extra_headers = []
+
+ # Extra handshake handler may modify/remove processors.
+ self._dispatcher.do_extra_handshake(self._request)
+ processors = filter(lambda processor: processor is not None,
+ self._request.ws_extension_processors)
+
+ # Ask each processor if there are extensions on the request which
+ # cannot co-exist. When processor decided other processors cannot
+ # co-exist with it, the processor marks them (or itself) as
+ # "inactive". The first extension processor has the right to
+ # make the final call.
+ for processor in reversed(processors):
+ if processor.is_active():
+ processor.check_consistency_with_other_processors(
+ processors)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
+ accepted_extensions = []
+
+ # We need to take into account of mux extension here.
+ # If mux extension exists:
+ # - Remove processors of extensions for logical channel,
+ # which are processors located before the mux processor
+ # - Pass extension requests for logical channel to mux processor
+ # - Attach the mux processor to the request. It will be referred
+ # by dispatcher to see whether the dispatcher should use mux
+ # handler or not.
+ mux_index = -1
+ for i, processor in enumerate(processors):
+ if processor.name() == common.MUX_EXTENSION:
+ mux_index = i
+ break
+ if mux_index >= 0:
+ logical_channel_extensions = []
+ for processor in processors[:mux_index]:
+ logical_channel_extensions.append(processor.request())
+ processor.set_active(False)
+ self._request.mux_processor = processors[mux_index]
+ self._request.mux_processor.set_extensions(
+ logical_channel_extensions)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
+ stream_options = StreamOptions()
+
+ for index, processor in enumerate(processors):
+ if not processor.is_active():
+ continue
+
+ extension_response = processor.get_extension_response()
+ if extension_response is None:
+ # Rejected.
+ continue
+
+ accepted_extensions.append(extension_response)
+
+ processor.setup_stream_options(stream_options)
+
+ if not is_compression_extension(processor.name()):
+ continue
+
+ # Inactivate all of the following compression extensions.
+ for j in xrange(index + 1, len(processors)):
+ if is_compression_extension(processors[j].name()):
+ processors[j].set_active(False)
+
+ if len(accepted_extensions) > 0:
+ self._request.ws_extensions = accepted_extensions
+ self._logger.debug(
+ 'Extensions accepted: %r',
+ map(common.ExtensionParameter.name, accepted_extensions))
+ else:
+ self._request.ws_extensions = None
+
+ self._request.ws_stream = self._create_stream(stream_options)
+
+ if self._request.ws_requested_protocols is not None:
+ if self._request.ws_protocol is None:
+ raise HandshakeException(
+ 'do_extra_handshake must choose one subprotocol from '
+ 'ws_requested_protocols and set it to ws_protocol')
+ validate_subprotocol(self._request.ws_protocol)
+
+ self._logger.debug(
+ 'Subprotocol accepted: %r',
+ self._request.ws_protocol)
+ else:
+ if self._request.ws_protocol is not None:
+ raise HandshakeException(
+ 'ws_protocol must be None when the client didn\'t '
+ 'request any subprotocol')
+
+ self._send_handshake(accept)
+ except HandshakeException, e:
+ if not e.status:
+ # Fallback to 400 bad request by default.
+ e.status = common.HTTP_STATUS_BAD_REQUEST
+ raise e
+
+ def _get_origin(self):
+ origin_header = common.ORIGIN_HEADER
+ origin = self._request.headers_in.get(origin_header)
+ if origin is None:
+ self._logger.debug('Client request does not have origin header')
+ self._request.ws_origin = origin
+
+ def _check_version(self):
+ version = get_mandatory_header(self._request,
+ common.SEC_WEBSOCKET_VERSION_HEADER)
+ if version == _VERSION_LATEST_STRING:
+ return _VERSION_LATEST
+
+ if version.find(',') >= 0:
+ raise HandshakeException(
+ 'Multiple versions (%r) are not allowed for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ status=common.HTTP_STATUS_BAD_REQUEST)
+ raise VersionException(
+ 'Unsupported version %r for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
+
+ def _set_protocol(self):
+ self._request.ws_protocol = None
+
+ protocol_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+
+ if protocol_header is None:
+ self._request.ws_requested_protocols = None
+ return
+
+ self._request.ws_requested_protocols = parse_token_list(
+ protocol_header)
+ self._logger.debug('Subprotocols requested: %r',
+ self._request.ws_requested_protocols)
+
+ def _parse_extensions(self):
+ extensions_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
+ if not extensions_header:
+ self._request.ws_requested_extensions = None
+ return
+
+ try:
+ self._request.ws_requested_extensions = common.parse_extensions(
+ extensions_header)
+ except common.ExtensionParsingException, e:
+ raise HandshakeException(
+ 'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
+
+ self._logger.debug(
+ 'Extensions requested: %r',
+ map(common.ExtensionParameter.name,
+ self._request.ws_requested_extensions))
+
+ def _validate_key(self, key):
+ if key.find(',') >= 0:
+ raise HandshakeException('Request has multiple %s header lines or '
+ 'contains illegal character \',\': %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ # Validate
+ key_is_valid = False
+ try:
+ # Validate key by quick regex match before parsing by base64
+ # module. Because base64 module skips invalid characters, we have
+ # to do this in advance to make this server strictly reject illegal
+ # keys.
+ if _SEC_WEBSOCKET_KEY_REGEX.match(key):
+ decoded_key = base64.b64decode(key)
+ if len(decoded_key) == 16:
+ key_is_valid = True
+ except TypeError, e:
+ pass
+
+ if not key_is_valid:
+ raise HandshakeException(
+ 'Illegal value for header %s: %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ return decoded_key
+
+ def _get_key(self):
+ key = get_mandatory_header(
+ self._request, common.SEC_WEBSOCKET_KEY_HEADER)
+
+ decoded_key = self._validate_key(key)
+
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_KEY_HEADER,
+ key,
+ util.hexify(decoded_key))
+
+ return key
+
+ def _create_stream(self, stream_options):
+ return Stream(self._request, stream_options)
+
+ def _create_handshake_response(self, accept):
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # WebSocket headers
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+ if self._request.ws_protocol is not None:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append(format_header(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+
+ # Headers not specific for WebSocket
+ for name, value in self._request.extra_headers:
+ response.append(format_header(name, value))
+
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ raw_response = self._create_handshake_response(accept)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi00.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi00.py
new file mode 100644
index 000000000..8757717a6
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi00.py
@@ -0,0 +1,293 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol version HyBi 00.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import logging
+import re
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_default_port
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import parse_host_header
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+
+
+_MANDATORY_HEADERS = [
+ # key, expected value or None
+ [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
+ [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
+]
+
+
+def _validate_subprotocol(subprotocol):
+ """Checks if characters in subprotocol are in range between U+0020 and
+ U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
+ requirement.
+
+ See the Section 4.1. Opening handshake of the spec.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be in the range U+0020 to U+007E.
+ for c in subprotocol:
+ if not 0x20 <= ord(c) <= 0x7e:
+ raise HandshakeException(
+ 'Illegal character in subprotocol name: %r' % c)
+
+
+def _check_header_lines(request, mandatory_headers):
+ check_request_line(request)
+
+ # The expected field names, and the meaning of their corresponding
+ # values, are as follows.
+ # |Upgrade| and |Connection|
+ for key, expected_value in mandatory_headers:
+ validate_mandatory_header(request, key, expected_value)
+
+
+def _build_location(request):
+ """Build WebSocket location for request."""
+
+ location_parts = []
+ if request.is_https():
+ location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+ else:
+ location_parts.append(common.WEB_SOCKET_SCHEME)
+ location_parts.append('://')
+ host, port = parse_host_header(request)
+ connection_port = request.connection.local_addr[1]
+ if port != connection_port:
+ raise HandshakeException('Header/connection port mismatch: %d/%d' %
+ (port, connection_port))
+ location_parts.append(host)
+ if (port != get_default_port(request.is_https())):
+ location_parts.append(':')
+ location_parts.append(str(port))
+ location_parts.append(request.unparsed_uri)
+ return ''.join(location_parts)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol version HyBi 00.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def do_handshake(self):
+ """Perform WebSocket Handshake.
+
+ On _request, we set
+ ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
+ ws_challenge_md5: WebSocket handshake information.
+ ws_stream: Frame generation/parsing class.
+ ws_version: Protocol version.
+
+ Raises:
+ HandshakeException: when any error happened in parsing the opening
+ handshake request.
+ """
+
+ # 5.1 Reading the client's opening handshake.
+ # dispatcher sets it in self._request.
+ _check_header_lines(self._request, _MANDATORY_HEADERS)
+ self._set_resource()
+ self._set_subprotocol()
+ self._set_location()
+ self._set_origin()
+ self._set_challenge_response()
+ self._set_protocol_version()
+
+ self._dispatcher.do_extra_handshake(self._request)
+
+ self._send_handshake()
+
+ def _set_resource(self):
+ self._request.ws_resource = self._request.uri
+
+ def _set_subprotocol(self):
+ # |Sec-WebSocket-Protocol|
+ subprotocol = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+ if subprotocol is not None:
+ _validate_subprotocol(subprotocol)
+ self._request.ws_protocol = subprotocol
+
+ def _set_location(self):
+ # |Host|
+ host = self._request.headers_in.get(common.HOST_HEADER)
+ if host is not None:
+ self._request.ws_location = _build_location(self._request)
+ # TODO(ukai): check host is this host.
+
+ def _set_origin(self):
+ # |Origin|
+ origin = self._request.headers_in.get(common.ORIGIN_HEADER)
+ if origin is not None:
+ self._request.ws_origin = origin
+
+ def _set_protocol_version(self):
+ # |Sec-WebSocket-Draft|
+ draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
+ if draft is not None and draft != '0':
+ raise HandshakeException('Illegal value for %s: %s' %
+ (common.SEC_WEBSOCKET_DRAFT_HEADER,
+ draft))
+
+ self._logger.debug('Protocol version is HyBi 00')
+ self._request.ws_version = common.VERSION_HYBI00
+ self._request.ws_stream = StreamHixie75(self._request, True)
+
+ def _set_challenge_response(self):
+ # 5.2 4-8.
+ self._request.ws_challenge = self._get_challenge()
+ # 5.2 9. let /response/ be the MD5 finterprint of /challenge/
+ self._request.ws_challenge_md5 = util.md5_hash(
+ self._request.ws_challenge).digest()
+ self._logger.debug(
+ 'Challenge: %r (%s)',
+ self._request.ws_challenge,
+ util.hexify(self._request.ws_challenge))
+ self._logger.debug(
+ 'Challenge response: %r (%s)',
+ self._request.ws_challenge_md5,
+ util.hexify(self._request.ws_challenge_md5))
+
+ def _get_key_value(self, key_field):
+ key_value = get_mandatory_header(self._request, key_field)
+
+ self._logger.debug('%s: %r', key_field, key_value)
+
+ # 5.2 4. let /key-number_n/ be the digits (characters in the range
+ # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
+ # interpreted as a base ten integer, ignoring all other characters
+ # in /key_n/.
+ try:
+ key_number = int(re.sub("\\D", "", key_value))
+ except:
+ raise HandshakeException('%s field contains no digit' % key_field)
+ # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
+ # in /key_n/.
+ spaces = re.subn(" ", "", key_value)[1]
+ if spaces == 0:
+ raise HandshakeException('%s field contains no space' % key_field)
+
+ self._logger.debug(
+ '%s: Key-number is %d and number of spaces is %d',
+ key_field, key_number, spaces)
+
+ # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
+ # then abort the WebSocket connection.
+ if key_number % spaces != 0:
+ raise HandshakeException(
+ '%s: Key-number (%d) is not an integral multiple of spaces '
+ '(%d)' % (key_field, key_number, spaces))
+ # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
+ part = key_number / spaces
+ self._logger.debug('%s: Part is %d', key_field, part)
+ return part
+
+ def _get_challenge(self):
+ # 5.2 4-7.
+ key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
+ key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
+ # 5.2 8. let /challenge/ be the concatenation of /part_1/,
+ challenge = ''
+ challenge += struct.pack('!I', key1) # network byteorder int
+ challenge += struct.pack('!I', key2) # network byteorder int
+ challenge += self._request.connection.read(8)
+ return challenge
+
+ def _send_handshake(self):
+ response = []
+
+ # 5.2 10. send the following line.
+ response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
+
+ # 5.2 11. send the following fields to the client.
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
+ if self._request.ws_protocol:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ # 5.2 12. send two bytes 0x0D 0x0A.
+ response.append('\r\n')
+ # 5.2 13. send /response/
+ response.append(self._request.ws_challenge_md5)
+
+ raw_response = ''.join(response)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py
new file mode 100644
index 000000000..c244421cf
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py
@@ -0,0 +1,254 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""PythonHeaderParserHandler for mod_pywebsocket.
+
+Apache HTTP Server and mod_python must be configured such that this
+function is called to handle WebSocket request.
+"""
+
+
+import logging
+
+from mod_python import apache
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+
+
+# PythonOption to specify the handler root directory.
+_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
+
+# PythonOption to specify the handler scan directory.
+# This must be a directory under the root directory.
+# The default is the root directory.
+_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
+
+# PythonOption to allow handlers whose canonical path is
+# not under the root directory. It's disallowed by default.
+# Set this option with value of 'yes' to allow.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
+ 'mod_pywebsocket.allow_handlers_outside_root_dir')
+# Map from values to their meanings. 'Yes' and 'No' are allowed just for
+# compatibility.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
+ 'off': False, 'no': False, 'on': True, 'yes': True}
+
+# (Obsolete option. Ignored.)
+# PythonOption to specify to allow handshake defined in Hixie 75 version
+# protocol. The default is None (Off)
+_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
+# Map from values to their meanings.
+_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
+
+
+class ApacheLogHandler(logging.Handler):
+ """Wrapper logging.Handler to emit log message to apache's error.log."""
+
+ _LEVELS = {
+ logging.DEBUG: apache.APLOG_DEBUG,
+ logging.INFO: apache.APLOG_INFO,
+ logging.WARNING: apache.APLOG_WARNING,
+ logging.ERROR: apache.APLOG_ERR,
+ logging.CRITICAL: apache.APLOG_CRIT,
+ }
+
+ def __init__(self, request=None):
+ logging.Handler.__init__(self)
+ self._log_error = apache.log_error
+ if request is not None:
+ self._log_error = request.log_error
+
+ # Time and level will be printed by Apache.
+ self._formatter = logging.Formatter('%(name)s: %(message)s')
+
+ def emit(self, record):
+ apache_level = apache.APLOG_DEBUG
+ if record.levelno in ApacheLogHandler._LEVELS:
+ apache_level = ApacheLogHandler._LEVELS[record.levelno]
+
+ msg = self._formatter.format(record)
+
+ # "server" parameter must be passed to have "level" parameter work.
+ # If only "level" parameter is passed, nothing shows up on Apache's
+ # log. However, at this point, we cannot get the server object of the
+ # virtual host which will process WebSocket requests. The only server
+ # object we can get here is apache.main_server. But Wherever (server
+ # configuration context or virtual host context) we put
+ # PythonHeaderParserHandler directive, apache.main_server just points
+ # the main server instance (not any of virtual server instance). Then,
+ # Apache follows LogLevel directive in the server configuration context
+ # to filter logs. So, we need to specify LogLevel in the server
+ # configuration context. Even if we specify "LogLevel debug" in the
+ # virtual host context which actually handles WebSocket connections,
+ # DEBUG level logs never show up unless "LogLevel debug" is specified
+ # in the server configuration context.
+ #
+ # TODO(tyoshino): Provide logging methods on request object. When
+ # request is mp_request object (when used together with Apache), the
+ # methods call request.log_error indirectly. When request is
+ # _StandaloneRequest, the methods call Python's logging facility which
+ # we create in standalone.py.
+ self._log_error(msg, apache_level, apache.main_server)
+
+
+def _configure_logging():
+ logger = logging.getLogger()
+ # Logs are filtered by Apache based on LogLevel directive in Apache
+ # configuration file. We must just pass logs for all levels to
+ # ApacheLogHandler.
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(ApacheLogHandler())
+
+
+_configure_logging()
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _parse_option(name, value, definition):
+ if value is None:
+ return False
+
+ meaning = definition.get(value.lower())
+ if meaning is None:
+ raise Exception('Invalid value for PythonOption %s: %r' %
+ (name, value))
+ return meaning
+
+
+def _create_dispatcher():
+ _LOGGER.info('Initializing Dispatcher')
+
+ options = apache.main_server.get_options()
+
+ handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
+ if not handler_root:
+ raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
+ apache.APLOG_ERR)
+
+ handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
+
+ allow_handlers_outside_root = _parse_option(
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
+ options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
+
+ dispatcher = dispatch.Dispatcher(
+ handler_root, handler_scan, allow_handlers_outside_root)
+
+ for warning in dispatcher.source_warnings():
+ apache.log_error(
+ 'mod_pywebsocket: Warning in source loading: %s' % warning,
+ apache.APLOG_WARNING)
+
+ return dispatcher
+
+
+# Initialize
+_dispatcher = _create_dispatcher()
+
+
+def headerparserhandler(request):
+ """Handle request.
+
+ Args:
+ request: mod_python request.
+
+ This function is named headerparserhandler because it is the default
+ name for a PythonHeaderParserHandler.
+ """
+
+ handshake_is_done = False
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not _dispatcher.get_handler_suite(request.uri):
+ request.log_error(
+ 'mod_pywebsocket: No handler for resource: %r' % request.uri,
+ apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
+ return apache.DECLINED
+ except dispatch.DispatchException, e:
+ request.log_error(
+ 'mod_pywebsocket: Dispatch failed for error: %s' % e,
+ apache.APLOG_INFO)
+ if not handshake_is_done:
+ return e.status
+
+ try:
+ allow_draft75 = _parse_option(
+ _PYOPT_ALLOW_DRAFT75,
+ apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
+ _PYOPT_ALLOW_DRAFT75_DEFINITION)
+
+ try:
+ handshake.do_handshake(
+ request, _dispatcher, allowDraft75=allow_draft75)
+ except handshake.VersionException, e:
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for version error: %s' % e,
+ apache.APLOG_INFO)
+ request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ return apache.HTTP_BAD_REQUEST
+ except handshake.HandshakeException, e:
+ # Handshake for ws/wss failed.
+ # Send http response with error status.
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for error: %s' % e,
+ apache.APLOG_INFO)
+ return e.status
+
+ handshake_is_done = True
+ request._dispatcher = _dispatcher
+ _dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
+ except Exception, e:
+ # DispatchException can also be thrown if something is wrong in
+ # pywebsocket code. It's caught here, then.
+
+ request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
+ (e, util.get_stack_trace()),
+ apache.APLOG_ERR)
+ # Unknown exceptions before handshake mean Apache must handle its
+ # request with another handler.
+ if not handshake_is_done:
+ return apache.DECLINED
+ # Set assbackwards to suppress response header generation by Apache.
+ request.assbackwards = 1
+ return apache.DONE # Return DONE such that no other handlers are invoked.
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/http_header_util.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/http_header_util.py
new file mode 100644
index 000000000..b77465393
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/http_header_util.py
@@ -0,0 +1,263 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities for parsing and formatting headers that follow the grammar defined
+in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
+"""
+
+
+import urlparse
+
+
+_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+def _is_char(c):
+ """Returns true iff c is in CHAR as specified in HTTP RFC."""
+
+ return ord(c) <= 127
+
+
+def _is_ctl(c):
+ """Returns true iff c is in CTL as specified in HTTP RFC."""
+
+ return ord(c) <= 31 or ord(c) == 127
+
+
+class ParsingState(object):
+
+ def __init__(self, data):
+ self.data = data
+ self.head = 0
+
+
+def peek(state, pos=0):
+ """Peeks the character at pos from the head of data."""
+
+ if state.head + pos >= len(state.data):
+ return None
+
+ return state.data[state.head + pos]
+
+
+def consume(state, amount=1):
+ """Consumes specified amount of bytes from the head and returns the
+ consumed bytes. If there's not enough bytes to consume, returns None.
+ """
+
+ if state.head + amount > len(state.data):
+ return None
+
+ result = state.data[state.head:state.head + amount]
+ state.head = state.head + amount
+ return result
+
+
+def consume_string(state, expected):
+ """Given a parsing state and a expected string, consumes the string from
+ the head. Returns True if consumed successfully. Otherwise, returns
+ False.
+ """
+
+ pos = 0
+
+ for c in expected:
+ if c != peek(state, pos):
+ return False
+ pos += 1
+
+ consume(state, pos)
+ return True
+
+
+def consume_lws(state):
+ """Consumes a LWS from the head. Returns True if any LWS is consumed.
+ Otherwise, returns False.
+
+ LWS = [CRLF] 1*( SP | HT )
+ """
+
+ original_head = state.head
+
+ consume_string(state, '\r\n')
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c == ' ' or c == '\t':
+ pos += 1
+ else:
+ if pos == 0:
+ state.head = original_head
+ return False
+ else:
+ consume(state, pos)
+ return True
+
+
+def consume_lwses(state):
+ """Consumes *LWS from the head."""
+
+ while consume_lws(state):
+ pass
+
+
+def consume_token(state):
+ """Consumes a token from the head. Returns the token or None if no token
+ was found.
+ """
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ if pos == 0:
+ return None
+
+ return consume(state, pos)
+ else:
+ pos += 1
+
+
+def consume_token_or_quoted_string(state):
+ """Consumes a token or a quoted-string, and returns the token or unquoted
+ string. If no token or quoted-string was found, returns None.
+ """
+
+ original_head = state.head
+
+ if not consume_string(state, '"'):
+ return consume_token(state)
+
+ result = []
+
+ expect_quoted_pair = False
+
+ while True:
+ if not expect_quoted_pair and consume_lws(state):
+ result.append(' ')
+ continue
+
+ c = consume(state)
+ if c is None:
+ # quoted-string is not enclosed with double quotation
+ state.head = original_head
+ return None
+ elif expect_quoted_pair:
+ expect_quoted_pair = False
+ if _is_char(c):
+ result.append(c)
+ else:
+ # Non CHAR character found in quoted-pair
+ state.head = original_head
+ return None
+ elif c == '\\':
+ expect_quoted_pair = True
+ elif c == '"':
+ return ''.join(result)
+ elif _is_ctl(c):
+ # Invalid character %r found in qdtext
+ state.head = original_head
+ return None
+ else:
+ result.append(c)
+
+
+def quote_if_necessary(s):
+ """Quotes arbitrary string into quoted-string."""
+
+ quote = False
+ if s == '':
+ return '""'
+
+ result = []
+ for c in s:
+ if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ quote = True
+
+ if c == '"' or _is_ctl(c):
+ result.append('\\' + c)
+ else:
+ result.append(c)
+
+ if quote:
+ return '"' + ''.join(result) + '"'
+ else:
+ return ''.join(result)
+
+
+def parse_uri(uri):
+ """Parse absolute URI then return host, port and resource."""
+
+ parsed = urlparse.urlsplit(uri)
+ if parsed.scheme != 'wss' and parsed.scheme != 'ws':
+ # |uri| must be a relative URI.
+ # TODO(toyoshim): Should validate |uri|.
+ return None, None, uri
+
+ if parsed.hostname is None:
+ return None, None, None
+
+ port = None
+ try:
+ port = parsed.port
+ except ValueError, e:
+ # port property cause ValueError on invalid null port description like
+ # 'ws://host:/path'.
+ return None, None, None
+
+ if port is None:
+ if parsed.scheme == 'ws':
+ port = 80
+ else:
+ port = 443
+
+ path = parsed.path
+ if not path:
+ path += '/'
+ if parsed.query:
+ path += '?' + parsed.query
+ if parsed.fragment:
+ path += '#' + parsed.fragment
+
+ return parsed.hostname, port, path
+
+
+try:
+ urlparse.uses_netloc.index('ws')
+except ValueError, e:
+ # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
+ urlparse.uses_netloc.append('ws')
+ urlparse.uses_netloc.append('wss')
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/memorizingfile.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/memorizingfile.py
new file mode 100644
index 000000000..4d4cd9585
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/memorizingfile.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Memorizing file.
+
+A memorizing file wraps a file and memorizes lines read by readline.
+"""
+
+
+import sys
+
+
+class MemorizingFile(object):
+ """MemorizingFile wraps a file and memorizes lines read by readline.
+
+ Note that data read by other methods are not memorized. This behavior
+ is good enough for memorizing lines SimpleHTTPServer reads before
+ the control reaches WebSocketRequestHandler.
+ """
+
+ def __init__(self, file_, max_memorized_lines=sys.maxint):
+ """Construct an instance.
+
+ Args:
+ file_: the file object to wrap.
+ max_memorized_lines: the maximum number of lines to memorize.
+ Only the first max_memorized_lines are memorized.
+ Default: sys.maxint.
+ """
+
+ self._file = file_
+ self._memorized_lines = []
+ self._max_memorized_lines = max_memorized_lines
+ self._buffered = False
+ self._buffered_line = None
+
+ def __getattribute__(self, name):
+ if name in ('_file', '_memorized_lines', '_max_memorized_lines',
+ '_buffered', '_buffered_line', 'readline',
+ 'get_memorized_lines'):
+ return object.__getattribute__(self, name)
+ return self._file.__getattribute__(name)
+
+ def readline(self, size=-1):
+ """Override file.readline and memorize the line read.
+
+ Note that even if size is specified and smaller than actual size,
+ the whole line will be read out from underlying file object by
+ subsequent readline calls.
+ """
+
+ if self._buffered:
+ line = self._buffered_line
+ self._buffered = False
+ else:
+ line = self._file.readline()
+ if line and len(self._memorized_lines) < self._max_memorized_lines:
+ self._memorized_lines.append(line)
+ if size >= 0 and size < len(line):
+ self._buffered = True
+ self._buffered_line = line[size:]
+ return line[:size]
+ return line
+
+ def get_memorized_lines(self):
+ """Get lines memorized so far."""
+ return self._memorized_lines
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py
new file mode 100644
index 000000000..4c1a0114b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py
@@ -0,0 +1,219 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Message related utilities.
+
+Note: request.connection.write/read are used in this module, even though
+mod_python document says that they should be used only in connection
+handlers. Unfortunately, we have no other options. For example,
+request.write/read are not suitable because they don't allow direct raw
+bytes writing/reading.
+"""
+
+
+import Queue
+import threading
+
+
+# Export Exception symbols from msgutil for backward compatibility
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+# An API for handler to send/receive WebSocket messages.
+def close_connection(request):
+ """Close connection.
+
+ Args:
+ request: mod_python request.
+ """
+ request.ws_stream.close_connection()
+
+
+def send_message(request, payload_data, end=True, binary=False):
+ """Send a message (or part of a message).
+
+ Args:
+ request: mod_python request.
+ payload_data: unicode text or str binary to send.
+ end: True to terminate a message.
+ False to send payload_data as part of a message that is to be
+ terminated by next or later send_message call with end=True.
+ binary: send payload_data as binary frame(s).
+ Raises:
+ BadOperationException: when server already terminated.
+ """
+ request.ws_stream.send_message(payload_data, end, binary)
+
+
+def receive_message(request):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Args:
+ request: mod_python request.
+ Raises:
+ InvalidFrameException: when client send invalid frame.
+ UnsupportedFrameException: when client send unsupported frame e.g. some
+ of reserved bit is set but no extension can
+ recognize it.
+ InvalidUTF8Exception: when client send a text frame containing any
+ invalid UTF-8 string.
+ ConnectionTerminatedException: when the connection is closed
+ unexpectedly.
+ BadOperationException: when client already terminated.
+ """
+ return request.ws_stream.receive_message()
+
+
+def send_ping(request, body=''):
+ request.ws_stream.send_ping(body)
+
+
+class MessageReceiver(threading.Thread):
+ """This class receives messages from the client.
+
+ This class provides three ways to receive messages: blocking,
+ non-blocking, and via callback. Callback has the highest precedence.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request, onmessage=None):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ onmessage: a function to be called when a message is received.
+ May be None. If not None, the function is called on
+ another thread. In that case, MessageReceiver.receive
+ and MessageReceiver.receive_nowait are useless
+ because they will never return any messages.
+ """
+
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self._onmessage = onmessage
+ self._stop_requested = False
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ while not self._stop_requested:
+ message = receive_message(self._request)
+ if self._onmessage:
+ self._onmessage(message)
+ else:
+ self._queue.put(message)
+ finally:
+ close_connection(self._request)
+
+ def receive(self):
+ """ Receive a message from the channel, blocking.
+
+ Returns:
+ message as a unicode string.
+ """
+ return self._queue.get()
+
+ def receive_nowait(self):
+ """ Receive a message from the channel, non-blocking.
+
+ Returns:
+ message as a unicode string if available. None otherwise.
+ """
+ try:
+ message = self._queue.get_nowait()
+ except Queue.Empty:
+ message = None
+ return message
+
+ def stop(self):
+ """Request to stop this instance.
+
+ The instance will be stopped after receiving the next message.
+ This method may not be very useful, but there is no clean way
+ in Python to forcefully stop a running thread.
+ """
+ self._stop_requested = True
+
+
+class MessageSender(threading.Thread):
+ """This class sends messages to the client.
+
+ This class provides both synchronous and asynchronous ways to send
+ messages.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ while True:
+ message, condition = self._queue.get()
+ condition.acquire()
+ send_message(self._request, message)
+ condition.notify()
+ condition.release()
+
+ def send(self, message):
+ """Send a message, blocking."""
+
+ condition = threading.Condition()
+ condition.acquire()
+ self._queue.put((message, condition))
+ condition.wait()
+
+ def send_nowait(self, message):
+ """Send a message, non-blocking."""
+
+ self._queue.put((message, threading.Condition()))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/mux.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/mux.py
new file mode 100644
index 000000000..76334685b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/mux.py
@@ -0,0 +1,1889 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for multiplexing extension.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
+"""
+
+
+import collections
+import copy
+import email
+import email.parser
+import logging
+import math
+import struct
+import threading
+import traceback
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import parse_frame
+from mod_pywebsocket.handshake import hybi
+
+
+_CONTROL_CHANNEL_ID = 0
+_DEFAULT_CHANNEL_ID = 1
+
+_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
+_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
+_MUX_OPCODE_FLOW_CONTROL = 2
+_MUX_OPCODE_DROP_CHANNEL = 3
+_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
+
+_MAX_CHANNEL_ID = 2 ** 29 - 1
+
+_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
+_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
+
+_HANDSHAKE_ENCODING_IDENTITY = 0
+_HANDSHAKE_ENCODING_DELTA = 1
+
+# We need only these status code for now.
+_HTTP_BAD_RESPONSE_MESSAGES = {
+ common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
+}
+
+# DropChannel reason code
+# TODO(bashi): Define all reason code defined in -05 draft.
+_DROP_CODE_NORMAL_CLOSURE = 1000
+
+_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
+_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
+_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
+_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
+_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
+_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
+_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
+_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
+
+_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
+_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
+_DROP_CODE_ACKNOWLEDGED = 3008
+_DROP_CODE_BAD_FRAGMENTATION = 3009
+
+
+class MuxUnexpectedException(Exception):
+ """Exception in handling multiplexing extension."""
+ pass
+
+
+# Temporary
+class MuxNotImplementedException(Exception):
+ """Raised when a flow enters unimplemented code path."""
+ pass
+
+
+class LogicalConnectionClosedException(Exception):
+ """Raised when logical connection is gracefully closed."""
+ pass
+
+
+class PhysicalConnectionError(Exception):
+ """Raised when there is a physical connection error."""
+ def __init__(self, drop_code, message=''):
+ super(PhysicalConnectionError, self).__init__(
+ 'code=%d, message=%r' % (drop_code, message))
+ self.drop_code = drop_code
+ self.message = message
+
+
+class LogicalChannelError(Exception):
+ """Raised when there is a logical channel error."""
+ def __init__(self, channel_id, drop_code, message=''):
+ super(LogicalChannelError, self).__init__(
+ 'channel_id=%d, code=%d, message=%r' % (
+ channel_id, drop_code, message))
+ self.channel_id = channel_id
+ self.drop_code = drop_code
+ self.message = message
+
+
+def _encode_channel_id(channel_id):
+ if channel_id < 0:
+ raise ValueError('Channel id %d must not be negative' % channel_id)
+
+ if channel_id < 2 ** 7:
+ return chr(channel_id)
+ if channel_id < 2 ** 14:
+ return struct.pack('!H', 0x8000 + channel_id)
+ if channel_id < 2 ** 21:
+ first = chr(0xc0 + (channel_id >> 16))
+ return first + struct.pack('!H', channel_id & 0xffff)
+ if channel_id < 2 ** 29:
+ return struct.pack('!L', 0xe0000000 + channel_id)
+
+ raise ValueError('Channel id %d is too large' % channel_id)
+
+
+def _encode_number(number):
+ return create_length_header(number, False)
+
+
+def _create_add_channel_response(channel_id, encoded_handshake,
+ encoding=0, rejected=False):
+ if encoding != 0 and encoding != 1:
+ raise ValueError('Invalid encoding %d' % encoding)
+
+ first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
+ (rejected << 4) | encoding)
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(len(encoded_handshake)) +
+ encoded_handshake)
+ return block
+
+
+def _create_drop_channel(channel_id, code=None, message=''):
+ if len(message) > 0 and code is None:
+ raise ValueError('Code must be specified if message is specified')
+
+ first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
+ block = chr(first_byte) + _encode_channel_id(channel_id)
+ if code is None:
+ block += _encode_number(0) # Reason size
+ else:
+ reason = struct.pack('!H', code) + message
+ reason_size = _encode_number(len(reason))
+ block += reason_size + reason
+
+ return block
+
+
+def _create_flow_control(channel_id, replenished_quota):
+ first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(replenished_quota))
+ return block
+
+
+def _create_new_channel_slot(slots, send_quota):
+ if slots < 0 or send_quota < 0:
+ raise ValueError('slots and send_quota must be non-negative.')
+ first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
+ block = (chr(first_byte) +
+ _encode_number(slots) +
+ _encode_number(send_quota))
+ return block
+
+
+def _create_fallback_new_channel_slot():
+ first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
+ block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
+ return block
+
+
+def _parse_request_text(request_text):
+ request_line, header_lines = request_text.split('\r\n', 1)
+
+ words = request_line.split(' ')
+ if len(words) != 3:
+ raise ValueError('Bad Request-Line syntax %r' % request_line)
+ [command, path, version] = words
+ if version != 'HTTP/1.1':
+ raise ValueError('Bad request version %r' % version)
+
+ # email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
+ # RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
+ # RFC 822.
+ headers = email.parser.Parser().parsestr(header_lines)
+ return command, path, version, headers
+
+
+class _ControlBlock(object):
+ """A structure that holds parsing result of multiplexing control block.
+ Control block specific attributes will be added by _MuxFramePayloadParser.
+ (e.g. encoded_handshake will be added for AddChannelRequest and
+ AddChannelResponse)
+ """
+
+ def __init__(self, opcode):
+ self.opcode = opcode
+
+
+class _MuxFramePayloadParser(object):
+ """A class that parses multiplexed frame payload."""
+
+ def __init__(self, payload):
+ self._data = payload
+ self._read_position = 0
+ self._logger = util.get_class_logger(self)
+
+ def read_channel_id(self):
+ """Reads channel id.
+
+ Raises:
+ ValueError: when the payload doesn't contain
+ valid channel id.
+ """
+
+ remaining_length = len(self._data) - self._read_position
+ pos = self._read_position
+ if remaining_length == 0:
+ raise ValueError('Invalid channel id format')
+
+ channel_id = ord(self._data[pos])
+ channel_id_length = 1
+ if channel_id & 0xe0 == 0xe0:
+ if remaining_length < 4:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!L',
+ self._data[pos:pos+4])[0] & 0x1fffffff
+ channel_id_length = 4
+ elif channel_id & 0xc0 == 0xc0:
+ if remaining_length < 3:
+ raise ValueError('Invalid channel id format')
+ channel_id = (((channel_id & 0x1f) << 16) +
+ struct.unpack('!H', self._data[pos+1:pos+3])[0])
+ channel_id_length = 3
+ elif channel_id & 0x80 == 0x80:
+ if remaining_length < 2:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!H',
+ self._data[pos:pos+2])[0] & 0x3fff
+ channel_id_length = 2
+ self._read_position += channel_id_length
+
+ return channel_id
+
+ def read_inner_frame(self):
+ """Reads an inner frame.
+
+ Raises:
+ PhysicalConnectionError: when the inner frame is invalid.
+ """
+
+ if len(self._data) == self._read_position:
+ raise PhysicalConnectionError(
+ _DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
+
+ bits = ord(self._data[self._read_position])
+ self._read_position += 1
+ fin = (bits & 0x80) == 0x80
+ rsv1 = (bits & 0x40) == 0x40
+ rsv2 = (bits & 0x20) == 0x20
+ rsv3 = (bits & 0x10) == 0x10
+ opcode = bits & 0xf
+ payload = self.remaining_data()
+ # Consume rest of the message which is payload data of the original
+ # frame.
+ self._read_position = len(self._data)
+ return fin, rsv1, rsv2, rsv3, opcode, payload
+
+ def _read_number(self):
+ if self._read_position + 1 > len(self._data):
+ raise ValueError(
+ 'Cannot read the first byte of number field')
+
+ number = ord(self._data[self._read_position])
+ if number & 0x80 == 0x80:
+ raise ValueError(
+ 'The most significant bit of the first byte of number should '
+ 'be unset')
+ self._read_position += 1
+ pos = self._read_position
+ if number == 127:
+ if pos + 8 > len(self._data):
+ raise ValueError('Invalid number field')
+ self._read_position += 8
+ number = struct.unpack('!Q', self._data[pos:pos+8])[0]
+ if number > 0x7FFFFFFFFFFFFFFF:
+ raise ValueError('Encoded number(%d) >= 2^63' % number)
+ if number <= 0xFFFF:
+ raise ValueError(
+ '%d should not be encoded by 9 bytes encoding' % number)
+ return number
+ if number == 126:
+ if pos + 2 > len(self._data):
+ raise ValueError('Invalid number field')
+ self._read_position += 2
+ number = struct.unpack('!H', self._data[pos:pos+2])[0]
+ if number <= 125:
+ raise ValueError(
+ '%d should not be encoded by 3 bytes encoding' % number)
+ return number
+
+ def _read_size_and_contents(self):
+ """Reads data that consists of followings:
+ - the size of the contents encoded the same way as payload length
+ of the WebSocket Protocol with 1 bit padding at the head.
+ - the contents.
+ """
+
+ try:
+ size = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+ pos = self._read_position
+ if pos + size > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Cannot read %d bytes data' % size)
+
+ self._read_position += size
+ return self._data[pos:pos+size]
+
+ def _read_add_channel_request(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x7
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ # Invalid encoding will be handled by MuxHandler.
+ encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoding = encoding
+ encoded_handshake = self._read_size_and_contents()
+ control_block.encoded_handshake = encoded_handshake
+ return control_block
+
+ def _read_add_channel_response(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x3
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ control_block.accepted = (first_byte >> 4) & 1
+ control_block.encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoded_handshake = self._read_size_and_contents()
+ return control_block
+
+ def _read_flow_control(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+
+ return control_block
+
+ def _read_drop_channel(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ reason = self._read_size_and_contents()
+ if len(reason) == 0:
+ control_block.drop_code = None
+ control_block.drop_message = ''
+ elif len(reason) >= 2:
+ control_block.drop_code = struct.unpack('!H', reason[:2])[0]
+ control_block.drop_message = reason[2:]
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received DropChannel that conains only 1-byte reason')
+ return control_block
+
+ def _read_new_channel_slot(self, first_byte, control_block):
+ reserved = first_byte & 0x1e
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+ control_block.fallback = first_byte & 1
+ try:
+ control_block.slots = self._read_number()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+ return control_block
+
+ def read_control_blocks(self):
+ """Reads control block(s).
+
+ Raises:
+ PhysicalConnectionError: when the payload contains invalid control
+ block(s).
+ StopIteration: when no control blocks left.
+ """
+
+ while self._read_position < len(self._data):
+ first_byte = ord(self._data[self._read_position])
+ self._read_position += 1
+ opcode = (first_byte >> 5) & 0x7
+ control_block = _ControlBlock(opcode=opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ yield self._read_add_channel_request(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ yield self._read_add_channel_response(
+ first_byte, control_block)
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ yield self._read_flow_control(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ yield self._read_drop_channel(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ yield self._read_new_channel_slot(first_byte, control_block)
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_MUX_OPCODE,
+ 'Invalid opcode %d' % opcode)
+
+ assert self._read_position == len(self._data)
+ raise StopIteration
+
+ def remaining_data(self):
+ """Returns remaining data."""
+
+ return self._data[self._read_position:]
+
+
+class _LogicalRequest(object):
+ """Mimics mod_python request."""
+
+ def __init__(self, channel_id, command, path, protocol, headers,
+ connection):
+ """Constructs an instance.
+
+ Args:
+ channel_id: the channel id of the logical channel.
+ command: HTTP request command.
+ path: HTTP request path.
+ headers: HTTP headers.
+ connection: _LogicalConnection instance.
+ """
+
+ self.channel_id = channel_id
+ self.method = command
+ self.uri = path
+ self.protocol = protocol
+ self.headers_in = headers
+ self.connection = connection
+ self.server_terminated = False
+ self.client_terminated = False
+
+ def is_https(self):
+ """Mimics request.is_https(). Returns False because this method is
+ used only by old protocols (hixie and hybi00).
+ """
+
+ return False
+
+
+class _LogicalConnection(object):
+ """Mimics mod_python mp_conn."""
+
+ # For details, see the comment of set_read_state().
+ STATE_ACTIVE = 1
+ STATE_GRACEFULLY_CLOSED = 2
+ STATE_TERMINATED = 3
+
+ def __init__(self, mux_handler, channel_id):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ channel_id: channel id of this connection.
+ """
+
+ self._mux_handler = mux_handler
+ self._channel_id = channel_id
+ self._incoming_data = ''
+
+ # - Protects _waiting_write_completion
+ # - Signals the thread waiting for completion of write by mux handler
+ self._write_condition = threading.Condition()
+ self._waiting_write_completion = False
+
+ self._read_condition = threading.Condition()
+ self._read_state = self.STATE_ACTIVE
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return self._mux_handler.physical_connection.get_local_addr()
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr."""
+
+ return self._mux_handler.physical_connection.get_remote_addr()
+ remote_addr = property(get_remote_addr)
+
+ def get_memorized_lines(self):
+ """Gets memorized lines. Not supported."""
+
+ raise MuxUnexpectedException('_LogicalConnection does not support '
+ 'get_memorized_lines')
+
+ def write(self, data):
+ """Writes data. mux_handler sends data asynchronously. The caller will
+ be suspended until write done.
+
+ Args:
+ data: data to be written.
+
+ Raises:
+ MuxUnexpectedException: when called before finishing the previous
+ write.
+ """
+
+ try:
+ self._write_condition.acquire()
+ if self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Logical connection %d is already waiting the completion '
+ 'of write' % self._channel_id)
+
+ self._waiting_write_completion = True
+ self._mux_handler.send_data(self._channel_id, data)
+ self._write_condition.wait()
+ # TODO(tyoshino): Raise an exception if woke up by on_writer_done.
+ finally:
+ self._write_condition.release()
+
+ def write_control_data(self, data):
+ """Writes data via the control channel. Don't wait finishing write
+ because this method can be called by mux dispatcher.
+
+ Args:
+ data: data to be written.
+ """
+
+ self._mux_handler.send_control_data(data)
+
+ def on_write_data_done(self):
+ """Called when sending data is completed."""
+
+ try:
+ self._write_condition.acquire()
+ if not self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Invalid call of on_write_data_done for logical '
+ 'connection %d' % self._channel_id)
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+ def on_writer_done(self):
+ """Called by the mux handler when the writer thread has finished."""
+
+ try:
+ self._write_condition.acquire()
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+
+ def append_frame_data(self, frame_data):
+ """Appends incoming frame data. Called when mux_handler dispatches
+ frame data to the corresponding application.
+
+ Args:
+ frame_data: incoming frame data.
+ """
+
+ self._read_condition.acquire()
+ self._incoming_data += frame_data
+ self._read_condition.notify()
+ self._read_condition.release()
+
+ def read(self, length):
+ """Reads data. Blocks until enough data has arrived via physical
+ connection.
+
+ Args:
+ length: length of data to be read.
+ Raises:
+ LogicalConnectionClosedException: when closing handshake for this
+ logical channel has been received.
+ ConnectionTerminatedException: when the physical connection has
+ closed, or an error is caused on the reader thread.
+ """
+
+ self._read_condition.acquire()
+ while (self._read_state == self.STATE_ACTIVE and
+ len(self._incoming_data) < length):
+ self._read_condition.wait()
+
+ try:
+ if self._read_state == self.STATE_GRACEFULLY_CLOSED:
+ raise LogicalConnectionClosedException(
+ 'Logical channel %d has closed.' % self._channel_id)
+ elif self._read_state == self.STATE_TERMINATED:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Logical channel (%d) closed' %
+ (length, self._channel_id))
+
+ value = self._incoming_data[:length]
+ self._incoming_data = self._incoming_data[length:]
+ finally:
+ self._read_condition.release()
+
+ return value
+
+ def set_read_state(self, new_state):
+ """Sets the state of this connection. Called when an event for this
+ connection has occurred.
+
+ Args:
+ new_state: state to be set. new_state must be one of followings:
+ - STATE_GRACEFULLY_CLOSED: when closing handshake for this
+ connection has been received.
+ - STATE_TERMINATED: when the physical connection has closed or
+ DropChannel of this connection has received.
+ """
+
+ self._read_condition.acquire()
+ self._read_state = new_state
+ self._read_condition.notify()
+ self._read_condition.release()
+
+
+class _InnerMessage(object):
+ """Holds the result of _InnerMessageBuilder.build().
+ """
+
+ def __init__(self, opcode, payload):
+ self.opcode = opcode
+ self.payload = payload
+
+
+class _InnerMessageBuilder(object):
+ """A class that holds the context of inner message fragmentation and
+ builds a message from fragmented inner frame(s).
+ """
+
+ def __init__(self):
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+
+ def _handle_first(self, frame):
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ raise InvalidFrameException('Sending invalid continuation opcode')
+
+ if common.is_control_opcode(frame.opcode):
+ return self._process_first_fragmented_control(frame)
+ else:
+ return self._process_first_fragmented_message(frame)
+
+ def _process_first_fragmented_control(self, frame):
+ self._control_opcode = frame.opcode
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_control
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _process_first_fragmented_message(self, frame):
+ self._message_opcode = frame.opcode
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_message
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _handle_fragmented_control(self, frame):
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented control '
+ 'message' % frame.opcode)
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _reassemble_fragmented_control(self):
+ opcode = self._control_opcode
+ payload = ''.join(self._pending_control_fragments)
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ if self._message_opcode is not None:
+ self._frame_handler = self._handle_fragmented_message
+ else:
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def _handle_fragmented_message(self, frame):
+ # Sender can interleave a control message while sending fragmented
+ # messages.
+ if common.is_control_opcode(frame.opcode):
+ if self._control_opcode is not None:
+ raise MuxUnexpectedException(
+ 'Should not reach here(Bug in builder)')
+ return self._process_first_fragmented_control(frame)
+
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented message' %
+ frame.opcode)
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _reassemble_fragmented_message(self):
+ opcode = self._message_opcode
+ payload = ''.join(self._pending_message_fragments)
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def build(self, frame):
+ """Build an inner message. Returns an _InnerMessage instance when
+ the given frame is the last fragmented frame. Returns None otherwise.
+
+ Args:
+ frame: an inner frame.
+ Raises:
+ InvalidFrameException: when received invalid opcode. (e.g.
+ receiving non continuation data opcode but the fin flag of
+ the previous inner frame was not set.)
+ """
+
+ return self._frame_handler(frame)
+
+
+class _LogicalStream(Stream):
+ """Mimics the Stream class. This class interprets multiplexed WebSocket
+ frames.
+ """
+
+ def __init__(self, request, stream_options, send_quota, receive_quota):
+ """Constructs an instance.
+
+ Args:
+ request: _LogicalRequest instance.
+ stream_options: StreamOptions instance.
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ # Physical stream is responsible for masking.
+ stream_options.unmask_receive = False
+ Stream.__init__(self, request, stream_options)
+
+ self._send_closed = False
+ self._send_quota = send_quota
+ # - Protects _send_closed and _send_quota
+ # - Signals the thread waiting for send quota replenished
+ self._send_condition = threading.Condition()
+
+ # The opcode of the first frame in messages.
+ self._message_opcode = common.OPCODE_TEXT
+ # True when the last message was fragmented.
+ self._last_message_was_fragmented = False
+
+ self._receive_quota = receive_quota
+ self._write_inner_frame_semaphore = threading.Semaphore()
+
+ self._inner_message_builder = _InnerMessageBuilder()
+
+ def _create_inner_frame(self, opcode, payload, end=True):
+ frame = Frame(fin=end, opcode=opcode, payload=payload)
+ for frame_filter in self._options.outgoing_frame_filters:
+ frame_filter.filter(frame)
+
+ if len(payload) != len(frame.payload):
+ raise MuxUnexpectedException(
+ 'Mux extension must not be used after extensions which change '
+ ' frame boundary')
+
+ first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
+ (frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
+ return chr(first_byte) + frame.payload
+
+ def _write_inner_frame(self, opcode, payload, end=True):
+ payload_length = len(payload)
+ write_position = 0
+
+ try:
+ # An inner frame will be fragmented if there is no enough send
+ # quota. This semaphore ensures that fragmented inner frames are
+ # sent in order on the logical channel.
+ # Note that frames that come from other logical channels or
+ # multiplexing control blocks can be inserted between fragmented
+ # inner frames on the physical channel.
+ self._write_inner_frame_semaphore.acquire()
+
+ # Consume an octet quota when this is the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self._request.channel_id)
+
+ self._send_quota -= 1
+ finally:
+ self._send_condition.release()
+
+ while write_position < payload_length:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._logger.debug(
+ 'No quota. Waiting FlowControl message for %d.' %
+ self._request.channel_id)
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self.request._channel_id)
+
+ remaining = payload_length - write_position
+ write_length = min(self._send_quota, remaining)
+ inner_frame_end = (
+ end and
+ (write_position + write_length == payload_length))
+
+ inner_frame = self._create_inner_frame(
+ opcode,
+ payload[write_position:write_position+write_length],
+ inner_frame_end)
+ self._send_quota -= write_length
+ self._logger.debug('Consumed quota=%d, remaining=%d' %
+ (write_length, self._send_quota))
+ finally:
+ self._send_condition.release()
+
+ # Writing data will block the worker so we need to release
+ # _send_condition before writing.
+ self._logger.debug('Sending inner frame: %r' % inner_frame)
+ self._request.connection.write(inner_frame)
+ write_position += write_length
+
+ opcode = common.OPCODE_CONTINUATION
+
+ except ValueError, e:
+ raise BadOperationException(e)
+ finally:
+ self._write_inner_frame_semaphore.release()
+
+ def replenish_send_quota(self, send_quota):
+ """Replenish send quota."""
+
+ try:
+ self._send_condition.acquire()
+ if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
+ self._send_quota = 0
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
+ self._send_quota += send_quota
+ self._logger.debug('Replenished send quota for channel id %d: %d' %
+ (self._request.channel_id, self._send_quota))
+ finally:
+ self._send_condition.notify()
+ self._send_condition.release()
+
+ def consume_receive_quota(self, amount):
+ """Consumes receive quota. Returns False on failure."""
+
+ if self._receive_quota < amount:
+ self._logger.debug('Violate quota on channel id %d: %d < %d' %
+ (self._request.channel_id,
+ self._receive_quota, amount))
+ return False
+ self._receive_quota -= amount
+ return True
+
+ def send_message(self, message, end=True, binary=False):
+ """Override Stream.send_message."""
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ if binary:
+ opcode = common.OPCODE_BINARY
+ else:
+ opcode = common.OPCODE_TEXT
+ message = message.encode('utf-8')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ if self._last_message_was_fragmented:
+ if opcode != self._message_opcode:
+ raise BadOperationException('Message types are different in '
+ 'frames for the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ self._message_opcode = opcode
+
+ self._write_inner_frame(opcode, message, end)
+ self._last_message_was_fragmented = not end
+
+ def _receive_frame(self):
+ """Overrides Stream._receive_frame.
+
+ In addition to call Stream._receive_frame, this method adds the amount
+ of payload to receiving quota and sends FlowControl to the client.
+ We need to do it here because Stream.receive_message() handles
+ control frames internally.
+ """
+
+ opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
+ amount = len(payload)
+ # Replenish extra one octet when receiving the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ amount += 1
+ self._receive_quota += amount
+ frame_data = _create_flow_control(self._request.channel_id,
+ amount)
+ self._logger.debug('Sending flow control for %d, replenished=%d' %
+ (self._request.channel_id, amount))
+ self._request.connection.write_control_data(frame_data)
+ return opcode, payload, fin, rsv1, rsv2, rsv3
+
+ def _get_message_from_frame(self, frame):
+ """Overrides Stream._get_message_from_frame.
+ """
+
+ try:
+ inner_message = self._inner_message_builder.build(frame)
+ except InvalidFrameException:
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
+
+ if inner_message is None:
+ return None
+ self._original_opcode = inner_message.opcode
+ return inner_message.payload
+
+ def receive_message(self):
+ """Overrides Stream.receive_message."""
+
+ # Just call Stream.receive_message(), but catch
+ # LogicalConnectionClosedException, which is raised when the logical
+ # connection has closed gracefully.
+ try:
+ return Stream.receive_message(self)
+ except LogicalConnectionClosedException, e:
+ self._logger.debug('%s', e)
+ return None
+
+ def _send_closing_handshake(self, code, reason):
+ """Overrides Stream._send_closing_handshake."""
+
+ body = create_closing_handshake_body(code, reason)
+ self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
+ (self._request.channel_id, code, reason))
+ self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
+
+ self._request.server_terminated = True
+
+ def send_ping(self, body=''):
+ """Overrides Stream.send_ping"""
+
+ self._logger.debug('Sending ping on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PING, body, end=True)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ """Overrides Stream._send_pong"""
+
+ self._logger.debug('Sending pong on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PONG, body, end=True)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ """Overrides Stream.close_connection."""
+
+ # TODO(bashi): Implement
+ self._logger.debug('Closing logical connection %d' %
+ self._request.channel_id)
+ self._request.server_terminated = True
+
+ def stop_sending(self):
+ """Stops accepting new send operation (_write_inner_frame)."""
+
+ self._send_condition.acquire()
+ self._send_closed = True
+ self._send_condition.notify()
+ self._send_condition.release()
+
+
+class _OutgoingData(object):
+ """A structure that holds data to be sent via physical connection and
+ origin of the data.
+ """
+
+ def __init__(self, channel_id, data):
+ self.channel_id = channel_id
+ self.data = data
+
+
+class _PhysicalConnectionWriter(threading.Thread):
+ """A thread that is responsible for writing data to physical connection.
+
+ TODO(bashi): Make sure there is no thread-safety problem when the reader
+ thread reads data from the same socket at a time.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ # When set, make this thread stop accepting new data, flush pending
+ # data and exit.
+ self._stop_requested = False
+ # The close code of the physical connection.
+ self._close_code = common.STATUS_NORMAL_CLOSURE
+ # Deque for passing write data. It's protected by _deque_condition
+ # until _stop_requested is set.
+ self._deque = collections.deque()
+ # - Protects _deque, _stop_requested and _close_code
+ # - Signals threads waiting for them to be available
+ self._deque_condition = threading.Condition()
+
+ def put_outgoing_data(self, data):
+ """Puts outgoing data.
+
+ Args:
+ data: _OutgoingData instance.
+
+ Raises:
+ BadOperationException: when the thread has been requested to
+ terminate.
+ """
+
+ try:
+ self._deque_condition.acquire()
+ if self._stop_requested:
+ raise BadOperationException('Cannot write data anymore')
+
+ self._deque.append(data)
+ self._deque_condition.notify()
+ finally:
+ self._deque_condition.release()
+
+ def _write_data(self, outgoing_data):
+ message = (_encode_channel_id(outgoing_data.channel_id) +
+ outgoing_data.data)
+ try:
+ self._mux_handler.physical_stream.send_message(
+ message=message, end=True, binary=True)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._mux_handler.physical_connection.remote_addr,), e)
+ raise
+
+ # TODO(bashi): It would be better to block the thread that sends
+ # control data as well.
+ if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
+ self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
+
+ def run(self):
+ try:
+ self._deque_condition.acquire()
+ while not self._stop_requested:
+ if len(self._deque) == 0:
+ self._deque_condition.wait()
+ continue
+
+ outgoing_data = self._deque.popleft()
+
+ self._deque_condition.release()
+ self._write_data(outgoing_data)
+ self._deque_condition.acquire()
+
+ # Flush deque.
+ #
+ # At this point, self._deque_condition is always acquired.
+ try:
+ while len(self._deque) > 0:
+ outgoing_data = self._deque.popleft()
+ self._write_data(outgoing_data)
+ finally:
+ self._deque_condition.release()
+
+ # Close physical connection.
+ try:
+ # Don't wait the response here. The response will be read
+ # by the reader thread.
+ self._mux_handler.physical_stream.close_connection(
+ self._close_code, wait_response=False)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to close the physical connection: %r' % e)
+ raise
+ finally:
+ self._mux_handler.notify_writer_done()
+
+ def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
+ """Stops the writer thread."""
+
+ self._deque_condition.acquire()
+ self._stop_requested = True
+ self._close_code = close_code
+ self._deque_condition.notify()
+ self._deque_condition.release()
+
+
+class _PhysicalConnectionReader(threading.Thread):
+ """A thread that is responsible for reading data from physical connection.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ def run(self):
+ while True:
+ try:
+ physical_stream = self._mux_handler.physical_stream
+ message = physical_stream.receive_message()
+ if message is None:
+ break
+ # Below happens only when a data message is received.
+ opcode = physical_stream.get_last_received_opcode()
+ if opcode != common.OPCODE_BINARY:
+ self._mux_handler.fail_physical_connection(
+ _DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
+ 'Received a text message on physical connection')
+ break
+
+ except ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ break
+
+ try:
+ self._mux_handler.dispatch_message(message)
+ except PhysicalConnectionError, e:
+ self._mux_handler.fail_physical_connection(
+ e.drop_code, e.message)
+ break
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ except Exception, e:
+ self._logger.debug(traceback.format_exc())
+ break
+
+ self._mux_handler.notify_reader_done()
+
+
+class _Worker(threading.Thread):
+ """A thread that is responsible for running the corresponding application
+ handler.
+ """
+
+ def __init__(self, mux_handler, request):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ request: _LogicalRequest instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self._request = request
+ self.setDaemon(True)
+
+ def run(self):
+ self._logger.debug('Logical channel worker started. (id=%d)' %
+ self._request.channel_id)
+ try:
+ # Non-critical exceptions will be handled by dispatcher.
+ self._mux_handler.dispatcher.transfer_data(self._request)
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ finally:
+ self._mux_handler.notify_worker_done(self._request.channel_id)
+
+
+class _MuxHandshaker(hybi.Handshaker):
+ """Opening handshake processor for multiplexing."""
+
+ _DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
+
+ def __init__(self, request, dispatcher, send_quota, receive_quota):
+ """Constructs an instance.
+ Args:
+ request: _LogicalRequest instance.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ hybi.Handshaker.__init__(self, request, dispatcher)
+ self._send_quota = send_quota
+ self._receive_quota = receive_quota
+
+ # Append headers which should not be included in handshake field of
+ # AddChannelRequest.
+ # TODO(bashi): Make sure whether we should raise exception when
+ # these headers are included already.
+ request.headers_in[common.UPGRADE_HEADER] = (
+ common.WEBSOCKET_UPGRADE_TYPE)
+ request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
+ str(common.VERSION_HYBI_LATEST))
+ request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
+ self._DUMMY_WEBSOCKET_KEY)
+
+ def _create_stream(self, stream_options):
+ """Override hybi.Handshaker._create_stream."""
+
+ self._logger.debug('Creating logical stream for %d' %
+ self._request.channel_id)
+ return _LogicalStream(
+ self._request, stream_options, self._send_quota,
+ self._receive_quota)
+
+ def _create_handshake_response(self, accept):
+ """Override hybi._create_handshake_response."""
+
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # Upgrade and Sec-WebSocket-Accept should be excluded.
+ response.append('%s: %s\r\n' % (
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ if self._request.ws_protocol is not None:
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ """Override hybi.Handshaker._send_handshake."""
+
+ # Don't send handshake response for the default channel
+ if self._request.channel_id == _DEFAULT_CHANNEL_ID:
+ return
+
+ handshake_response = self._create_handshake_response(accept)
+ frame_data = _create_add_channel_response(
+ self._request.channel_id,
+ handshake_response)
+ self._logger.debug('Sending handshake response for %d: %r' %
+ (self._request.channel_id, frame_data))
+ self._request.connection.write_control_data(frame_data)
+
+
+class _LogicalChannelData(object):
+ """A structure that holds information about logical channel.
+ """
+
+ def __init__(self, request, worker):
+ self.request = request
+ self.worker = worker
+ self.drop_code = _DROP_CODE_NORMAL_CLOSURE
+ self.drop_message = ''
+
+
+class _HandshakeDeltaBase(object):
+ """A class that holds information for delta-encoded handshake."""
+
+ def __init__(self, headers):
+ self._headers = headers
+
+ def create_headers(self, delta=None):
+ """Creates request headers for an AddChannelRequest that has
+ delta-encoded handshake.
+
+ Args:
+ delta: headers should be overridden.
+ """
+
+ headers = copy.copy(self._headers)
+ if delta:
+ for key, value in delta.items():
+ # The spec requires that a header with an empty value is
+ # removed from the delta base.
+ if len(value) == 0 and headers.has_key(key):
+ del headers[key]
+ else:
+ headers[key] = value
+ return headers
+
+
+class _MuxHandler(object):
+ """Multiplexing handler. When a handler starts, it launches three
+ threads; the reader thread, the writer thread, and a worker thread.
+
+ The reader thread reads data from the physical stream, i.e., the
+ ws_stream object of the underlying websocket connection. The reader
+ thread interprets multiplexed frames and dispatches them to logical
+ channels. Methods of this class are mostly called by the reader thread.
+
+ The writer thread sends multiplexed frames which are created by
+ logical channels via the physical connection.
+
+ The worker thread launched at the starting point handles the
+ "Implicitly Opened Connection". If multiplexing handler receives
+ an AddChannelRequest and accepts it, the handler will launch a new worker
+ thread and dispatch the request to it.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request of the physical connection.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ """
+
+ self.original_request = request
+ self.dispatcher = dispatcher
+ self.physical_connection = request.connection
+ self.physical_stream = request.ws_stream
+ self._logger = util.get_class_logger(self)
+ self._logical_channels = {}
+ self._logical_channels_condition = threading.Condition()
+ # Holds client's initial quota
+ self._channel_slots = collections.deque()
+ self._handshake_base = None
+ self._worker_done_notify_received = False
+ self._reader = None
+ self._writer = None
+
+ def start(self):
+ """Starts the handler.
+
+ Raises:
+ MuxUnexpectedException: when the handler already started, or when
+ opening handshake of the default channel fails.
+ """
+
+ if self._reader or self._writer:
+ raise MuxUnexpectedException('MuxHandler already started')
+
+ self._reader = _PhysicalConnectionReader(self)
+ self._writer = _PhysicalConnectionWriter(self)
+ self._reader.start()
+ self._writer.start()
+
+ # Create "Implicitly Opened Connection".
+ logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
+ headers = copy.copy(self.original_request.headers_in)
+ # Add extensions for logical channel.
+ headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
+ common.format_extensions(
+ self.original_request.mux_processor.extensions()))
+ self._handshake_base = _HandshakeDeltaBase(headers)
+ logical_request = _LogicalRequest(
+ _DEFAULT_CHANNEL_ID,
+ self.original_request.method,
+ self.original_request.uri,
+ self.original_request.protocol,
+ self._handshake_base.create_headers(),
+ logical_connection)
+ # Client's send quota for the implicitly opened connection is zero,
+ # but we will send FlowControl later so set the initial quota to
+ # _INITIAL_QUOTA_FOR_CLIENT.
+ self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
+ send_quota = self.original_request.mux_processor.quota()
+ if not self._do_handshake_for_logical_request(
+ logical_request, send_quota=send_quota):
+ raise MuxUnexpectedException(
+ 'Failed handshake on the default channel id')
+ self._add_logical_channel(logical_request)
+
+ # Send FlowControl for the implicitly opened connection.
+ frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
+ _INITIAL_QUOTA_FOR_CLIENT)
+ logical_request.connection.write_control_data(frame_data)
+
+ def add_channel_slots(self, slots, send_quota):
+ """Adds channel slots.
+
+ Args:
+ slots: number of slots to be added.
+ send_quota: initial send quota for slots.
+ """
+
+ self._channel_slots.extend([send_quota] * slots)
+ # Send NewChannelSlot to client.
+ frame_data = _create_new_channel_slot(slots, send_quota)
+ self.send_control_data(frame_data)
+
+ def wait_until_done(self, timeout=None):
+ """Waits until all workers are done. Returns False when timeout has
+ occurred. Returns True on success.
+
+ Args:
+ timeout: timeout in sec.
+ """
+
+ self._logical_channels_condition.acquire()
+ try:
+ while len(self._logical_channels) > 0:
+ self._logger.debug('Waiting workers(%d)...' %
+ len(self._logical_channels))
+ self._worker_done_notify_received = False
+ self._logical_channels_condition.wait(timeout)
+ if not self._worker_done_notify_received:
+ self._logger.debug('Waiting worker(s) timed out')
+ return False
+ finally:
+ self._logical_channels_condition.release()
+
+ # Flush pending outgoing data
+ self._writer.stop()
+ self._writer.join()
+
+ return True
+
+ def notify_write_data_done(self, channel_id):
+ """Called by the writer thread when a write operation has done.
+
+ Args:
+ channel_id: objective channel id.
+ """
+
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ channel_data.request.connection.on_write_data_done()
+ else:
+ self._logger.debug('Seems that logical channel for %d has gone'
+ % channel_id)
+ finally:
+ self._logical_channels_condition.release()
+
+ def send_control_data(self, data):
+ """Sends data via the control channel.
+
+ Args:
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=_CONTROL_CHANNEL_ID, data=data))
+
+ def send_data(self, channel_id, data):
+ """Sends data via given logical channel. This method is called by
+ worker threads.
+
+ Args:
+ channel_id: objective channel id.
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=channel_id, data=data))
+
+ def _send_drop_channel(self, channel_id, code=None, message=''):
+ frame_data = _create_drop_channel(channel_id, code, message)
+ self._logger.debug(
+ 'Sending drop channel for channel id %d' % channel_id)
+ self.send_control_data(frame_data)
+
+ def _send_error_add_channel_response(self, channel_id, status=None):
+ if status is None:
+ status = common.HTTP_STATUS_BAD_REQUEST
+
+ if status in _HTTP_BAD_RESPONSE_MESSAGES:
+ message = _HTTP_BAD_RESPONSE_MESSAGES[status]
+ else:
+ self._logger.debug('Response message for %d is not found' % status)
+ message = '???'
+
+ response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
+ frame_data = _create_add_channel_response(channel_id,
+ encoded_handshake=response,
+ encoding=0, rejected=True)
+ self.send_control_data(frame_data)
+
+ def _create_logical_request(self, block):
+ if block.channel_id == _CONTROL_CHANNEL_ID:
+ # TODO(bashi): Raise PhysicalConnectionError with code 2006
+ # instead of MuxUnexpectedException.
+ raise MuxUnexpectedException(
+ 'Received the control channel id (0) as objective channel '
+ 'id for AddChannel')
+
+ if block.encoding > _HANDSHAKE_ENCODING_DELTA:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_REQUEST_ENCODING)
+
+ method, path, version, headers = _parse_request_text(
+ block.encoded_handshake)
+ if block.encoding == _HANDSHAKE_ENCODING_DELTA:
+ headers = self._handshake_base.create_headers(headers)
+
+ connection = _LogicalConnection(self, block.channel_id)
+ request = _LogicalRequest(block.channel_id, method, path, version,
+ headers, connection)
+ return request
+
+ def _do_handshake_for_logical_request(self, request, send_quota=0):
+ try:
+ receive_quota = self._channel_slots.popleft()
+ except IndexError:
+ raise LogicalChannelError(
+ request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
+
+ handshaker = _MuxHandshaker(request, self.dispatcher,
+ send_quota, receive_quota)
+ try:
+ handshaker.do_handshake()
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(
+ request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return False
+ except handshake.HandshakeException, e:
+ # TODO(bashi): Should we _Fail the Logical Channel_ with 3001
+ # instead?
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id,
+ status=e.status)
+ return False
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id)
+ return False
+
+ return True
+
+ def _add_logical_channel(self, logical_request):
+ try:
+ self._logical_channels_condition.acquire()
+ if logical_request.channel_id in self._logical_channels:
+ self._logger.debug('Channel id %d already exists' %
+ logical_request.channel_id)
+ raise PhysicalConnectionError(
+ _DROP_CODE_CHANNEL_ALREADY_EXISTS,
+ 'Channel id %d already exists' %
+ logical_request.channel_id)
+ worker = _Worker(self, logical_request)
+ channel_data = _LogicalChannelData(logical_request, worker)
+ self._logical_channels[logical_request.channel_id] = channel_data
+ worker.start()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_add_channel_request(self, block):
+ try:
+ logical_request = self._create_logical_request(block)
+ except ValueError, e:
+ self._logger.debug('Failed to create logical request: %r' % e)
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return
+ if self._do_handshake_for_logical_request(logical_request):
+ if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
+ # Update handshake base.
+ # TODO(bashi): Make sure this is the right place to update
+ # handshake base.
+ self._handshake_base = _HandshakeDeltaBase(
+ logical_request.headers_in)
+ self._add_logical_channel(logical_request)
+ else:
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+
+ def _process_flow_control(self, block):
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.request.ws_stream.replenish_send_quota(
+ block.send_quota)
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_drop_channel(self, block):
+ self._logger.debug(
+ 'DropChannel received for %d: code=%r, reason=%r' %
+ (block.channel_id, block.drop_code, block.drop_message))
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
+
+ # Close the logical channel
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_control_blocks(self, parser):
+ for control_block in parser.read_control_blocks():
+ opcode = control_block.opcode
+ self._logger.debug('control block received, opcode: %d' % opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ self._process_add_channel_request(control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received AddChannelResponse')
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ self._process_flow_control(control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ self._process_drop_channel(control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received NewChannelSlot')
+ else:
+ raise MuxUnexpectedException(
+ 'Unexpected opcode %r' % opcode)
+
+ def _process_logical_frame(self, channel_id, parser):
+ self._logger.debug('Received a frame. channel id=%d' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ # We must ignore the message for an inactive channel.
+ return
+ channel_data = self._logical_channels[channel_id]
+ fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
+ consuming_byte = len(payload)
+ if opcode != common.OPCODE_CONTINUATION:
+ consuming_byte += 1
+ if not channel_data.request.ws_stream.consume_receive_quota(
+ consuming_byte):
+ # The client violates quota. Close logical channel.
+ raise LogicalChannelError(
+ channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
+ header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
+ mask=False)
+ frame_data = header + payload
+ channel_data.request.connection.append_frame_data(frame_data)
+ finally:
+ self._logical_channels_condition.release()
+
+ def dispatch_message(self, message):
+ """Dispatches message. The reader thread calls this method.
+
+ Args:
+ message: a message that contains encapsulated frame.
+ Raises:
+ PhysicalConnectionError: if the message contains physical
+ connection level errors.
+ LogicalChannelError: if the message contains logical channel
+ level errors.
+ """
+
+ parser = _MuxFramePayloadParser(message)
+ try:
+ channel_id = parser.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
+ if channel_id == _CONTROL_CHANNEL_ID:
+ self._process_control_blocks(parser)
+ else:
+ self._process_logical_frame(channel_id, parser)
+
+ def notify_worker_done(self, channel_id):
+ """Called when a worker has finished.
+
+ Args:
+ channel_id: channel id corresponded with the worker.
+ """
+
+ self._logger.debug('Worker for channel id %d terminated' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ raise MuxUnexpectedException(
+ 'Channel id %d not found' % channel_id)
+ channel_data = self._logical_channels.pop(channel_id)
+ finally:
+ self._worker_done_notify_received = True
+ self._logical_channels_condition.notify()
+ self._logical_channels_condition.release()
+
+ if not channel_data.request.server_terminated:
+ self._send_drop_channel(
+ channel_id, code=channel_data.drop_code,
+ message=channel_data.drop_message)
+
+ def notify_reader_done(self):
+ """This method is called by the reader thread when the reader has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for incoming data '
+ '...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ except Exception:
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def notify_writer_done(self):
+ """This method is called by the writer thread when the writer has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for write '
+ 'completion ...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.on_writer_done()
+ except Exception:
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def fail_physical_connection(self, code, message):
+ """Fail the physical connection.
+
+ Args:
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing the physical connection...')
+ self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
+ self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
+
+ def fail_logical_channel(self, channel_id, code, message):
+ """Fail a logical channel.
+
+ Args:
+ channel_id: channel id.
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing logical channel %d...' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ # Close the logical channel. notify_worker_done() will be
+ # called later and it will send DropChannel.
+ channel_data.drop_code = code
+ channel_data.drop_message = message
+
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
+ else:
+ self._send_drop_channel(channel_id, code, message)
+ finally:
+ self._logical_channels_condition.release()
+
+
+def use_mux(request):
+ return hasattr(request, 'mux_processor') and (
+ request.mux_processor.is_active())
+
+
+def start(request, dispatcher):
+ mux_handler = _MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ _INITIAL_QUOTA_FOR_CLIENT)
+
+ mux_handler.wait_until_done()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/standalone.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/standalone.py
new file mode 100755
index 000000000..24c299eaf
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/standalone.py
@@ -0,0 +1,1193 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Standalone WebSocket server.
+
+Use this file to launch pywebsocket without Apache HTTP Server.
+
+
+BASIC USAGE
+===========
+
+Go to the src directory and run
+
+ $ python mod_pywebsocket/standalone.py [-p <ws_port>]
+ [-w <websock_handlers>]
+ [-d <document_root>]
+
+<ws_port> is the port number to use for ws:// connection.
+
+<document_root> is the path to the root directory of HTML files.
+
+<websock_handlers> is the path to the root directory of WebSocket handlers.
+If not specified, <document_root> will be used. See __init__.py (or
+run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
+
+For more detail and other options, run
+
+ $ python mod_pywebsocket/standalone.py --help
+
+or see _build_option_parser method below.
+
+For trouble shooting, adding "--log_level debug" might help you.
+
+
+TRY DEMO
+========
+
+Go to the src directory and run standalone.py with -d option to set the
+document root to the directory containing example HTMLs and handlers like this:
+
+ $ cd src
+ $ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example
+
+to launch pywebsocket with the sample handler and html on port 80. Open
+http://localhost/console.html, click the connect button, type something into
+the text box next to the send button and click the send button. If everything
+is working, you'll see the message you typed echoed by the server.
+
+
+USING TLS
+=========
+
+To run the standalone server with TLS support, run it with -t, -k, and -c
+options. When TLS is enabled, the standalone server accepts only TLS connection.
+
+Note that when ssl module is used and the key/cert location is incorrect,
+TLS connection silently fails while pyOpenSSL fails on startup.
+
+Example:
+
+ $ PYTHONPATH=. python mod_pywebsocket/standalone.py \
+ -d example \
+ -p 10443 \
+ -t \
+ -c ../test/cert/cert.pem \
+ -k ../test/cert/key.pem \
+
+Note that when passing a relative path to -c and -k option, it will be resolved
+using the document root directory as the base.
+
+
+USING CLIENT AUTHENTICATION
+===========================
+
+To run the standalone server with TLS client authentication support, run it with
+--tls-client-auth and --tls-client-ca options in addition to ones required for
+TLS support.
+
+Example:
+
+ $ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example -p 10443 -t \
+ -c ../test/cert/cert.pem -k ../test/cert/key.pem \
+ --tls-client-auth \
+ --tls-client-ca=../test/cert/cacert.pem
+
+Note that when passing a relative path to --tls-client-ca option, it will be
+resolved using the document root directory as the base.
+
+
+CONFIGURATION FILE
+==================
+
+You can also write a configuration file and use it by specifying the path to
+the configuration file by --config option. Please write a configuration file
+following the documentation of the Python ConfigParser library. Name of each
+entry must be the long version argument name. E.g. to set log level to debug,
+add the following line:
+
+log_level=debug
+
+For options which doesn't take value, please add some fake value. E.g. for
+--tls option, add the following line:
+
+tls=True
+
+Note that tls will be enabled even if you write tls=False as the value part is
+fake.
+
+When both a command line argument and a configuration file entry are set for
+the same configuration item, the command line value will override one in the
+configuration file.
+
+
+THREADING
+=========
+
+This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
+used for each request.
+
+
+SECURITY WARNING
+================
+
+This uses CGIHTTPServer and CGIHTTPServer is not secure.
+It may execute arbitrary Python code or external programs. It should not be
+used outside a firewall.
+"""
+
+import BaseHTTPServer
+import CGIHTTPServer
+import SimpleHTTPServer
+import SocketServer
+import ConfigParser
+import base64
+import httplib
+import logging
+import logging.handlers
+import optparse
+import os
+import re
+import select
+import socket
+import sys
+import threading
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import http_header_util
+from mod_pywebsocket import memorizingfile
+from mod_pywebsocket import util
+from mod_pywebsocket.xhr_benchmark_handler import XHRBenchmarkHandler
+
+
+_DEFAULT_LOG_MAX_BYTES = 1024 * 256
+_DEFAULT_LOG_BACKUP_COUNT = 5
+
+_DEFAULT_REQUEST_QUEUE_SIZE = 128
+
+# 1024 is practically large enough to contain WebSocket handshake lines.
+_MAX_MEMORIZED_LINES = 1024
+
+# Constants for the --tls_module flag.
+_TLS_BY_STANDARD_MODULE = 'ssl'
+_TLS_BY_PYOPENSSL = 'pyopenssl'
+
+
+class _StandaloneConnection(object):
+ """Mimic mod_python mp_conn."""
+
+ def __init__(self, request_handler):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._request_handler = request_handler
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return (self._request_handler.server.server_name,
+ self._request_handler.server.server_port)
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr.
+
+ Setting the property in __init__ won't work because the request
+ handler is not initialized yet there."""
+
+ return self._request_handler.client_address
+ remote_addr = property(get_remote_addr)
+
+ def write(self, data):
+ """Mimic mp_conn.write()."""
+
+ return self._request_handler.wfile.write(data)
+
+ def read(self, length):
+ """Mimic mp_conn.read()."""
+
+ return self._request_handler.rfile.read(length)
+
+ def get_memorized_lines(self):
+ """Get memorized lines."""
+
+ return self._request_handler.rfile.get_memorized_lines()
+
+
+class _StandaloneRequest(object):
+ """Mimic mod_python request."""
+
+ def __init__(self, request_handler, use_tls):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request_handler = request_handler
+ self.connection = _StandaloneConnection(request_handler)
+ self._use_tls = use_tls
+ self.headers_in = request_handler.headers
+
+ def get_uri(self):
+ """Getter to mimic request.uri.
+
+ This method returns the raw data at the Request-URI part of the
+ Request-Line, while the uri method on the request object of mod_python
+ returns the path portion after parsing the raw data. This behavior is
+ kept for compatibility.
+ """
+
+ return self._request_handler.path
+ uri = property(get_uri)
+
+ def get_unparsed_uri(self):
+ """Getter to mimic request.unparsed_uri."""
+
+ return self._request_handler.path
+ unparsed_uri = property(get_unparsed_uri)
+
+ def get_method(self):
+ """Getter to mimic request.method."""
+
+ return self._request_handler.command
+ method = property(get_method)
+
+ def get_protocol(self):
+ """Getter to mimic request.protocol."""
+
+ return self._request_handler.request_version
+ protocol = property(get_protocol)
+
+ def is_https(self):
+ """Mimic request.is_https()."""
+
+ return self._use_tls
+
+
+def _import_ssl():
+ global ssl
+ try:
+ import ssl
+ return True
+ except ImportError:
+ return False
+
+
+def _import_pyopenssl():
+ global OpenSSL
+ try:
+ import OpenSSL.SSL
+ return True
+ except ImportError:
+ return False
+
+
+class _StandaloneSSLConnection(object):
+ """A wrapper class for OpenSSL.SSL.Connection to
+ - provide makefile method which is not supported by the class
+ - tweak shutdown method since OpenSSL.SSL.Connection.shutdown doesn't
+ accept the "how" argument.
+ - convert SysCallError exceptions that its recv method may raise into a
+ return value of '', meaning EOF. We cannot overwrite the recv method on
+ self._connection since it's immutable.
+ """
+
+ _OVERRIDDEN_ATTRIBUTES = ['_connection', 'makefile', 'shutdown', 'recv']
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def __getattribute__(self, name):
+ if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
+ return object.__getattribute__(self, name)
+ return self._connection.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
+ return object.__setattr__(self, name, value)
+ return self._connection.__setattr__(name, value)
+
+ def makefile(self, mode='r', bufsize=-1):
+ return socket._fileobject(self, mode, bufsize)
+
+ def shutdown(self, unused_how):
+ self._connection.shutdown()
+
+ def recv(self, bufsize, flags=0):
+ if flags != 0:
+ raise ValueError('Non-zero flags not allowed')
+
+ try:
+ return self._connection.recv(bufsize)
+ except OpenSSL.SSL.SysCallError, (err, message):
+ if err == -1:
+ # Suppress "unexpected EOF" exception. See the OpenSSL document
+ # for SSL_get_error.
+ return ''
+ raise
+
+
+def _alias_handlers(dispatcher, websock_handlers_map_file):
+ """Set aliases specified in websock_handler_map_file in dispatcher.
+
+ Args:
+ dispatcher: dispatch.Dispatcher instance
+ websock_handler_map_file: alias map file
+ """
+
+ fp = open(websock_handlers_map_file)
+ try:
+ for line in fp:
+ if line[0] == '#' or line.isspace():
+ continue
+ m = re.match('(\S+)\s+(\S+)', line)
+ if not m:
+ logging.warning('Wrong format in map file:' + line)
+ continue
+ try:
+ dispatcher.add_resource_path_alias(
+ m.group(1), m.group(2))
+ except dispatch.DispatchException, e:
+ logging.error(str(e))
+ finally:
+ fp.close()
+
+
+class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """HTTPServer specialized for WebSocket."""
+
+ # Overrides SocketServer.ThreadingMixIn.daemon_threads
+ daemon_threads = True
+ # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
+ allow_reuse_address = True
+
+ def __init__(self, options):
+ """Override SocketServer.TCPServer.__init__ to set SSL enabled
+ socket object to self.socket before server_bind and server_activate,
+ if necessary.
+ """
+
+ # Share a Dispatcher among request handlers to save time for
+ # instantiation. Dispatcher can be shared because it is thread-safe.
+ options.dispatcher = dispatch.Dispatcher(
+ options.websock_handlers,
+ options.scan_dir,
+ options.allow_handlers_outside_root_dir)
+ if options.websock_handlers_map_file:
+ _alias_handlers(options.dispatcher,
+ options.websock_handlers_map_file)
+ warnings = options.dispatcher.source_warnings()
+ if warnings:
+ for warning in warnings:
+ logging.warning('Warning in source loading: %s' % warning)
+
+ self._logger = util.get_class_logger(self)
+
+ self.request_queue_size = options.request_queue_size
+ self.__ws_is_shut_down = threading.Event()
+ self.__ws_serving = False
+
+ SocketServer.BaseServer.__init__(
+ self, (options.server_host, options.port), WebSocketRequestHandler)
+
+ # Expose the options object to allow handler objects access it. We name
+ # it with websocket_ prefix to avoid conflict.
+ self.websocket_server_options = options
+
+ self._create_sockets()
+ self.server_bind()
+ self.server_activate()
+
+ def _create_sockets(self):
+ self.server_name, self.server_port = self.server_address
+ self._sockets = []
+ if not self.server_name:
+ # On platforms that doesn't support IPv6, the first bind fails.
+ # On platforms that supports IPv6
+ # - If it binds both IPv4 and IPv6 on call with AF_INET6, the
+ # first bind succeeds and the second fails (we'll see 'Address
+ # already in use' error).
+ # - If it binds only IPv6 on call with AF_INET6, both call are
+ # expected to succeed to listen both protocol.
+ addrinfo_array = [
+ (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
+ (socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
+ else:
+ addrinfo_array = socket.getaddrinfo(self.server_name,
+ self.server_port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ socket.IPPROTO_TCP)
+ for addrinfo in addrinfo_array:
+ self._logger.info('Create socket on: %r', addrinfo)
+ family, socktype, proto, canonname, sockaddr = addrinfo
+ try:
+ socket_ = socket.socket(family, socktype)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ continue
+ server_options = self.websocket_server_options
+ if server_options.use_tls:
+ # For the case of _HAS_OPEN_SSL, we do wrapper setup after
+ # accept.
+ if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if server_options.tls_client_auth:
+ if server_options.tls_client_cert_optional:
+ client_cert_ = ssl.CERT_OPTIONAL
+ else:
+ client_cert_ = ssl.CERT_REQUIRED
+ else:
+ client_cert_ = ssl.CERT_NONE
+ socket_ = ssl.wrap_socket(socket_,
+ keyfile=server_options.private_key,
+ certfile=server_options.certificate,
+ ssl_version=ssl.PROTOCOL_SSLv23,
+ ca_certs=server_options.tls_client_ca,
+ cert_reqs=client_cert_,
+ do_handshake_on_connect=False)
+ self._sockets.append((socket_, addrinfo))
+
+ def server_bind(self):
+ """Override SocketServer.TCPServer.server_bind to enable multiple
+ sockets bind.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Bind on: %r', addrinfo)
+ if self.allow_reuse_address:
+ socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ socket_.bind(self.server_address)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+ if self.server_address[1] == 0:
+ # The operating system assigns the actual port number for port
+ # number 0. This case, the second and later sockets should use
+ # the same port number. Also self.server_port is rewritten
+ # because it is exported, and will be used by external code.
+ self.server_address = (
+ self.server_name, socket_.getsockname()[1])
+ self.server_port = self.server_address[1]
+ self._logger.info('Port %r is assigned', self.server_port)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ def server_activate(self):
+ """Override SocketServer.TCPServer.server_activate to enable multiple
+ sockets listen.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Listen on: %r', addrinfo)
+ try:
+ socket_.listen(self.request_queue_size)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ if len(self._sockets) == 0:
+ self._logger.critical(
+ 'No sockets activated. Use info log level to see the reason.')
+
+ def server_close(self):
+ """Override SocketServer.TCPServer.server_close to enable multiple
+ sockets close.
+ """
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Close on: %r', addrinfo)
+ socket_.close()
+
+ def fileno(self):
+ """Override SocketServer.TCPServer.fileno."""
+
+ self._logger.critical('Not supported: fileno')
+ return self._sockets[0][0].fileno()
+
+ def handle_error(self, request, client_address):
+ """Override SocketServer.handle_error."""
+
+ self._logger.error(
+ 'Exception in processing request from: %r\n%s',
+ client_address,
+ util.get_stack_trace())
+ # Note: client_address is a tuple.
+
+ def get_request(self):
+ """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
+ object with _StandaloneSSLConnection to provide makefile method. We
+ cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
+ attribute.
+ """
+
+ accepted_socket, client_address = self.socket.accept()
+
+ server_options = self.websocket_server_options
+ if server_options.use_tls:
+ if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
+ try:
+ accepted_socket.do_handshake()
+ except ssl.SSLError, e:
+ self._logger.debug('%r', e)
+ raise
+
+ # Print cipher in use. Handshake is done on accept.
+ self._logger.debug('Cipher: %s', accepted_socket.cipher())
+ self._logger.debug('Client cert: %r',
+ accepted_socket.getpeercert())
+ elif server_options.tls_module == _TLS_BY_PYOPENSSL:
+ # We cannot print the cipher in use. pyOpenSSL doesn't provide
+ # any method to fetch that.
+
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(server_options.private_key)
+ ctx.use_certificate_file(server_options.certificate)
+
+ def default_callback(conn, cert, errnum, errdepth, ok):
+ return ok == 1
+
+ # See the OpenSSL document for SSL_CTX_set_verify.
+ if server_options.tls_client_auth:
+ verify_mode = OpenSSL.SSL.VERIFY_PEER
+ if not server_options.tls_client_cert_optional:
+ verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT
+ ctx.set_verify(verify_mode, default_callback)
+ ctx.load_verify_locations(server_options.tls_client_ca,
+ None)
+ else:
+ ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback)
+
+ accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket)
+ accepted_socket.set_accept_state()
+
+ # Convert SSL related error into socket.error so that
+ # SocketServer ignores them and keeps running.
+ #
+ # TODO(tyoshino): Convert all kinds of errors.
+ try:
+ accepted_socket.do_handshake()
+ except OpenSSL.SSL.Error, e:
+ # Set errno part to 1 (SSL_ERROR_SSL) like the ssl module
+ # does.
+ self._logger.debug('%r', e)
+ raise socket.error(1, '%r' % e)
+ cert = accepted_socket.get_peer_certificate()
+ if cert is not None:
+ self._logger.debug('Client cert subject: %r',
+ cert.get_subject().get_components())
+ accepted_socket = _StandaloneSSLConnection(accepted_socket)
+ else:
+ raise ValueError('No TLS support module is available')
+
+ return accepted_socket, client_address
+
+ def serve_forever(self, poll_interval=0.5):
+ """Override SocketServer.BaseServer.serve_forever."""
+
+ self.__ws_serving = True
+ self.__ws_is_shut_down.clear()
+ handle_request = self.handle_request
+ if hasattr(self, '_handle_request_noblock'):
+ handle_request = self._handle_request_noblock
+ else:
+ self._logger.warning('Fallback to blocking request handler')
+ try:
+ while self.__ws_serving:
+ r, w, e = select.select(
+ [socket_[0] for socket_ in self._sockets],
+ [], [], poll_interval)
+ for socket_ in r:
+ self.socket = socket_
+ handle_request()
+ self.socket = None
+ finally:
+ self.__ws_is_shut_down.set()
+
+ def shutdown(self):
+ """Override SocketServer.BaseServer.shutdown."""
+
+ self.__ws_serving = False
+ self.__ws_is_shut_down.wait()
+
+
+class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
+ """CGIHTTPRequestHandler specialized for WebSocket."""
+
+ # Use httplib.HTTPMessage instead of mimetools.Message.
+ MessageClass = httplib.HTTPMessage
+
+ protocol_version = "HTTP/1.1"
+
+ def setup(self):
+ """Override SocketServer.StreamRequestHandler.setup to wrap rfile
+ with MemorizingFile.
+
+ This method will be called by BaseRequestHandler's constructor
+ before calling BaseHTTPRequestHandler.handle.
+ BaseHTTPRequestHandler.handle will call
+ BaseHTTPRequestHandler.handle_one_request and it will call
+ WebSocketRequestHandler.parse_request.
+ """
+
+ # Call superclass's setup to prepare rfile, wfile, etc. See setup
+ # definition on the root class SocketServer.StreamRequestHandler to
+ # understand what this does.
+ CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
+
+ self.rfile = memorizingfile.MemorizingFile(
+ self.rfile,
+ max_memorized_lines=_MAX_MEMORIZED_LINES)
+
+ def __init__(self, request, client_address, server):
+ self._logger = util.get_class_logger(self)
+
+ self._options = server.websocket_server_options
+
+ # Overrides CGIHTTPServerRequestHandler.cgi_directories.
+ self.cgi_directories = self._options.cgi_directories
+ # Replace CGIHTTPRequestHandler.is_executable method.
+ if self._options.is_executable_method is not None:
+ self.is_executable = self._options.is_executable_method
+
+ # This actually calls BaseRequestHandler.__init__.
+ CGIHTTPServer.CGIHTTPRequestHandler.__init__(
+ self, request, client_address, server)
+
+ def parse_request(self):
+ """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
+
+ Return True to continue processing for HTTP(S), False otherwise.
+
+ See BaseHTTPRequestHandler.handle_one_request method which calls
+ this method to understand how the return value will be handled.
+ """
+
+ # We hook parse_request method, but also call the original
+ # CGIHTTPRequestHandler.parse_request since when we return False,
+ # CGIHTTPRequestHandler.handle_one_request continues processing and
+ # it needs variables set by CGIHTTPRequestHandler.parse_request.
+ #
+ # Variables set by this method will be also used by WebSocket request
+ # handling (self.path, self.command, self.requestline, etc. See also
+ # how _StandaloneRequest's members are implemented using these
+ # attributes).
+ if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
+ return False
+
+ if self.command == "CONNECT":
+ self.send_response(200, "Connected")
+ self.send_header("Connection", "keep-alive")
+ self.end_headers()
+ return False
+
+ if self._options.use_basic_auth:
+ auth = self.headers.getheader('Authorization')
+ if auth != self._options.basic_auth_credential:
+ self.send_response(401)
+ self.send_header('WWW-Authenticate',
+ 'Basic realm="Pywebsocket"')
+ self.end_headers()
+ self._logger.info('Request basic authentication')
+ return False
+
+ host, port, resource = http_header_util.parse_uri(self.path)
+
+ # Special paths for XMLHttpRequest benchmark
+ xhr_benchmark_helper_prefix = '/073be001e10950692ccbf3a2ad21c245'
+ if resource == (xhr_benchmark_helper_prefix + '_send'):
+ xhr_benchmark_handler = XHRBenchmarkHandler(
+ self.headers, self.rfile, self.wfile)
+ xhr_benchmark_handler.do_send()
+ return False
+ if resource == (xhr_benchmark_helper_prefix + '_receive'):
+ xhr_benchmark_handler = XHRBenchmarkHandler(
+ self.headers, self.rfile, self.wfile)
+ xhr_benchmark_handler.do_receive()
+ return False
+
+ if resource is None:
+ self._logger.info('Invalid URI: %r', self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ server_options = self.server.websocket_server_options
+ if host is not None:
+ validation_host = server_options.validation_host
+ if validation_host is not None and host != validation_host:
+ self._logger.info('Invalid host: %r (expected: %r)',
+ host,
+ validation_host)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ if port is not None:
+ validation_port = server_options.validation_port
+ if validation_port is not None and port != validation_port:
+ self._logger.info('Invalid port: %r (expected: %r)',
+ port,
+ validation_port)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ self.path = resource
+
+ request = _StandaloneRequest(self, self._options.use_tls)
+
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not self._options.dispatcher.get_handler_suite(self.path):
+ self._logger.info('No handler for resource: %r',
+ self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ except dispatch.DispatchException, e:
+ self._logger.info('Dispatch failed for error: %s', e)
+ self.send_error(e.status)
+ return False
+
+ # If any Exceptions without except clause setup (including
+ # DispatchException) is raised below this point, it will be caught
+ # and logged by WebSocketServer.
+
+ try:
+ try:
+ handshake.do_handshake(
+ request,
+ self._options.dispatcher,
+ allowDraft75=self._options.allow_draft75,
+ strict=self._options.strict)
+ except handshake.VersionException, e:
+ self._logger.info('Handshake failed for version error: %s', e)
+ self.send_response(common.HTTP_STATUS_BAD_REQUEST)
+ self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ self.end_headers()
+ return False
+ except handshake.HandshakeException, e:
+ # Handshake for ws(s) failed.
+ self._logger.info('Handshake failed for error: %s', e)
+ self.send_error(e.status)
+ return False
+
+ request._dispatcher = self._options.dispatcher
+ self._options.dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ self._logger.info('Aborted: %s', e)
+ return False
+
+ def log_request(self, code='-', size='-'):
+ """Override BaseHTTPServer.log_request."""
+
+ self._logger.info('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, *args):
+ """Override BaseHTTPServer.log_error."""
+
+ # Despite the name, this method is for warnings than for errors.
+ # For example, HTTP status code is logged by this method.
+ self._logger.warning('%s - %s',
+ self.address_string(),
+ args[0] % args[1:])
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Add extra check that self.path doesn't contains ..
+ Also check if the file is a executable file or not.
+ If the file is not executable, it is handled as static file or dir
+ rather than a CGI script.
+ """
+
+ if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
+ if '..' in self.path:
+ return False
+ # strip query parameter from request path
+ resource_name = self.path.split('?', 2)[0]
+ # convert resource_name into real path name in filesystem.
+ scriptfile = self.translate_path(resource_name)
+ if not os.path.isfile(scriptfile):
+ return False
+ if not self.is_executable(scriptfile):
+ return False
+ return True
+ return False
+
+
+def _get_logger_from_class(c):
+ return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
+
+
+def _configure_logging(options):
+ logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.getLevelName(options.log_level.upper()))
+ if options.log_file:
+ handler = logging.handlers.RotatingFileHandler(
+ options.log_file, 'a', options.log_max, options.log_count)
+ else:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter(
+ '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ deflate_log_level_name = logging.getLevelName(
+ options.deflate_log_level.upper())
+ _get_logger_from_class(util._Deflater).setLevel(
+ deflate_log_level_name)
+ _get_logger_from_class(util._Inflater).setLevel(
+ deflate_log_level_name)
+
+
+def _build_option_parser():
+ parser = optparse.OptionParser()
+
+ parser.add_option('--config', dest='config_file', type='string',
+ default=None,
+ help=('Path to configuration file. See the file comment '
+ 'at the top of this file for the configuration '
+ 'file format'))
+ parser.add_option('-H', '--server-host', '--server_host',
+ dest='server_host',
+ default='',
+ help='server hostname to listen to')
+ parser.add_option('-V', '--validation-host', '--validation_host',
+ dest='validation_host',
+ default=None,
+ help='server hostname to validate in absolute path.')
+ parser.add_option('-p', '--port', dest='port', type='int',
+ default=common.DEFAULT_WEB_SOCKET_PORT,
+ help='port to listen to')
+ parser.add_option('-P', '--validation-port', '--validation_port',
+ dest='validation_port', type='int',
+ default=None,
+ help='server port to validate in absolute path.')
+ parser.add_option('-w', '--websock-handlers', '--websock_handlers',
+ dest='websock_handlers',
+ default='.',
+ help=('The root directory of WebSocket handler files. '
+ 'If the path is relative, --document-root is used '
+ 'as the base.'))
+ parser.add_option('-m', '--websock-handlers-map-file',
+ '--websock_handlers_map_file',
+ dest='websock_handlers_map_file',
+ default=None,
+ help=('WebSocket handlers map file. '
+ 'Each line consists of alias_resource_path and '
+ 'existing_resource_path, separated by spaces.'))
+ parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
+ default=None,
+ help=('Must be a directory under --websock-handlers. '
+ 'Only handlers under this directory are scanned '
+ 'and registered to the server. '
+ 'Useful for saving scan time when the handler '
+ 'root directory contains lots of files that are '
+ 'not handler file or are handler files but you '
+ 'don\'t want them to be registered. '))
+ parser.add_option('--allow-handlers-outside-root-dir',
+ '--allow_handlers_outside_root_dir',
+ dest='allow_handlers_outside_root_dir',
+ action='store_true',
+ default=False,
+ help=('Scans WebSocket handlers even if their canonical '
+ 'path is not under --websock-handlers.'))
+ parser.add_option('-d', '--document-root', '--document_root',
+ dest='document_root', default='.',
+ help='Document root directory.')
+ parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
+ default=None,
+ help=('CGI paths relative to document_root.'
+ 'Comma-separated. (e.g -x /cgi,/htbin) '
+ 'Files under document_root/cgi_path are handled '
+ 'as CGI programs. Must be executable.'))
+ parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
+ default=False, help='use TLS (wss://)')
+ parser.add_option('--tls-module', '--tls_module', dest='tls_module',
+ type='choice',
+ choices = [_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
+ help='Use ssl module if "%s" is specified. '
+ 'Use pyOpenSSL module if "%s" is specified' %
+ (_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
+ parser.add_option('-k', '--private-key', '--private_key',
+ dest='private_key',
+ default='', help='TLS private key file.')
+ parser.add_option('-c', '--certificate', dest='certificate',
+ default='', help='TLS certificate file.')
+ parser.add_option('--tls-client-auth', dest='tls_client_auth',
+ action='store_true', default=False,
+ help='Requests TLS client auth on every connection.')
+ parser.add_option('--tls-client-cert-optional',
+ dest='tls_client_cert_optional',
+ action='store_true', default=False,
+ help=('Makes client certificate optional even though '
+ 'TLS client auth is enabled.'))
+ parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
+ help=('Specifies a pem file which contains a set of '
+ 'concatenated CA certificates which are used to '
+ 'validate certificates passed from clients'))
+ parser.add_option('--basic-auth', dest='use_basic_auth',
+ action='store_true', default=False,
+ help='Requires Basic authentication.')
+ parser.add_option('--basic-auth-credential',
+ dest='basic_auth_credential', default='test:test',
+ help='Specifies the credential of basic authentication '
+ 'by username:password pair (e.g. test:test).')
+ parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
+ default='', help='Log file.')
+ # Custom log level:
+ # - FINE: Prints status of each frame processing step
+ parser.add_option('--log-level', '--log_level', type='choice',
+ dest='log_level', default='warn',
+ choices=['fine',
+ 'debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level.')
+ parser.add_option('--deflate-log-level', '--deflate_log_level',
+ type='choice',
+ dest='deflate_log_level', default='warn',
+ choices=['debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level for _Deflater and _Inflater.')
+ parser.add_option('--thread-monitor-interval-in-sec',
+ '--thread_monitor_interval_in_sec',
+ dest='thread_monitor_interval_in_sec',
+ type='int', default=-1,
+ help=('If positive integer is specified, run a thread '
+ 'monitor to show the status of server threads '
+ 'periodically in the specified inteval in '
+ 'second. If non-positive integer is specified, '
+ 'disable the thread monitor.'))
+ parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
+ default=_DEFAULT_LOG_MAX_BYTES,
+ help='Log maximum bytes')
+ parser.add_option('--log-count', '--log_count', dest='log_count',
+ type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
+ help='Log backup count')
+ parser.add_option('--allow-draft75', dest='allow_draft75',
+ action='store_true', default=False,
+ help='Obsolete option. Ignored.')
+ parser.add_option('--strict', dest='strict', action='store_true',
+ default=False, help='Obsolete option. Ignored.')
+ parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
+ default=_DEFAULT_REQUEST_QUEUE_SIZE,
+ help='request queue size')
+
+ return parser
+
+
+class ThreadMonitor(threading.Thread):
+ daemon = True
+
+ def __init__(self, interval_in_sec):
+ threading.Thread.__init__(self, name='ThreadMonitor')
+
+ self._logger = util.get_class_logger(self)
+
+ self._interval_in_sec = interval_in_sec
+
+ def run(self):
+ while True:
+ thread_name_list = []
+ for thread in threading.enumerate():
+ thread_name_list.append(thread.name)
+ self._logger.info(
+ "%d active threads: %s",
+ threading.active_count(),
+ ', '.join(thread_name_list))
+ time.sleep(self._interval_in_sec)
+
+
+def _parse_args_and_config(args):
+ parser = _build_option_parser()
+
+ # First, parse options without configuration file.
+ temporary_options, temporary_args = parser.parse_args(args=args)
+ if temporary_args:
+ logging.critical(
+ 'Unrecognized positional arguments: %r', temporary_args)
+ sys.exit(1)
+
+ if temporary_options.config_file:
+ try:
+ config_fp = open(temporary_options.config_file, 'r')
+ except IOError, e:
+ logging.critical(
+ 'Failed to open configuration file %r: %r',
+ temporary_options.config_file,
+ e)
+ sys.exit(1)
+
+ config_parser = ConfigParser.SafeConfigParser()
+ config_parser.readfp(config_fp)
+ config_fp.close()
+
+ args_from_config = []
+ for name, value in config_parser.items('pywebsocket'):
+ args_from_config.append('--' + name)
+ args_from_config.append(value)
+ if args is None:
+ args = args_from_config
+ else:
+ args = args_from_config + args
+ return parser.parse_args(args=args)
+ else:
+ return temporary_options, temporary_args
+
+
+def _main(args=None):
+ """You can call this function from your own program, but please note that
+ this function has some side-effects that might affect your program. For
+ example, util.wrap_popen3_for_win use in this method replaces implementation
+ of os.popen3.
+ """
+
+ options, args = _parse_args_and_config(args=args)
+
+ os.chdir(options.document_root)
+
+ _configure_logging(options)
+
+ if options.allow_draft75:
+ logging.warning('--allow_draft75 option is obsolete.')
+
+ if options.strict:
+ logging.warning('--strict option is obsolete.')
+
+ # TODO(tyoshino): Clean up initialization of CGI related values. Move some
+ # of code here to WebSocketRequestHandler class if it's better.
+ options.cgi_directories = []
+ options.is_executable_method = None
+ if options.cgi_paths:
+ options.cgi_directories = options.cgi_paths.split(',')
+ if sys.platform in ('cygwin', 'win32'):
+ cygwin_path = None
+ # For Win32 Python, it is expected that CYGWIN_PATH
+ # is set to a directory of cygwin binaries.
+ # For example, websocket_server.py in Chromium sets CYGWIN_PATH to
+ # full path of third_party/cygwin/bin.
+ if 'CYGWIN_PATH' in os.environ:
+ cygwin_path = os.environ['CYGWIN_PATH']
+ util.wrap_popen3_for_win(cygwin_path)
+
+ def __check_script(scriptpath):
+ return util.get_script_interp(scriptpath, cygwin_path)
+
+ options.is_executable_method = __check_script
+
+ if options.use_tls:
+ if options.tls_module is None:
+ if _import_ssl():
+ options.tls_module = _TLS_BY_STANDARD_MODULE
+ logging.debug('Using ssl module')
+ elif _import_pyopenssl():
+ options.tls_module = _TLS_BY_PYOPENSSL
+ logging.debug('Using pyOpenSSL module')
+ else:
+ logging.critical(
+ 'TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if not _import_ssl():
+ logging.critical('ssl module is not available')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_PYOPENSSL:
+ if not _import_pyopenssl():
+ logging.critical('pyOpenSSL module is not available')
+ sys.exit(1)
+ else:
+ logging.critical('Invalid --tls-module option: %r',
+ options.tls_module)
+ sys.exit(1)
+
+ if not options.private_key or not options.certificate:
+ logging.critical(
+ 'To use TLS, specify private_key and certificate.')
+ sys.exit(1)
+
+ if (options.tls_client_cert_optional and
+ not options.tls_client_auth):
+ logging.critical('Client authentication must be enabled to '
+ 'specify tls_client_cert_optional')
+ sys.exit(1)
+ else:
+ if options.tls_module is not None:
+ logging.critical('Use --tls-module option only together with '
+ '--use-tls option.')
+ sys.exit(1)
+
+ if options.tls_client_auth:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+
+ if options.tls_client_cert_optional:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+
+ if not options.scan_dir:
+ options.scan_dir = options.websock_handlers
+
+ if options.use_basic_auth:
+ options.basic_auth_credential = 'Basic ' + base64.b64encode(
+ options.basic_auth_credential)
+
+ try:
+ if options.thread_monitor_interval_in_sec > 0:
+ # Run a thread monitor to show the status of server threads for
+ # debugging.
+ ThreadMonitor(options.thread_monitor_interval_in_sec).start()
+
+ server = WebSocketServer(options)
+ server.serve_forever()
+ except Exception, e:
+ logging.critical('mod_pywebsocket: %s' % e)
+ logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ _main(sys.argv[1:])
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/stream.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/stream.py
new file mode 100644
index 000000000..edc533279
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/stream.py
@@ -0,0 +1,57 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file exports public symbols.
+"""
+
+
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hixie75 import StreamHixie75
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+
+# These methods are intended to be used by WebSocket client developers to have
+# their implementations receive broken data in tests.
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import create_ping_frame
+from mod_pywebsocket._stream_hybi import create_pong_frame
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_text_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/util.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/util.py
new file mode 100644
index 000000000..d224ae394
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/util.py
@@ -0,0 +1,416 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket utilities.
+"""
+
+
+import array
+import errno
+
+# Import hash classes from a module available and recommended for each Python
+# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
+# hashlib module in Python 2.6.
+try:
+ import hashlib
+ md5_hash = hashlib.md5
+ sha1_hash = hashlib.sha1
+except ImportError:
+ import md5
+ import sha
+ md5_hash = md5.md5
+ sha1_hash = sha.sha
+
+import StringIO
+import logging
+import os
+import re
+import socket
+import traceback
+import zlib
+
+try:
+ from mod_pywebsocket import fast_masking
+except ImportError:
+ pass
+
+
+def get_stack_trace():
+ """Get the current stack trace as string.
+
+ This is needed to support Python 2.3.
+ TODO: Remove this when we only support Python 2.4 and above.
+ Use traceback.format_exc instead.
+ """
+
+ out = StringIO.StringIO()
+ traceback.print_exc(file=out)
+ return out.getvalue()
+
+
+def prepend_message_to_exception(message, exc):
+ """Prepend message to the exception."""
+
+ exc.args = (message + str(exc),)
+ return
+
+
+def __translate_interp(interp, cygwin_path):
+ """Translate interp program path for Win32 python to run cygwin program
+ (e.g. perl). Note that it doesn't support path that contains space,
+ which is typically true for Unix, where #!-script is written.
+ For Win32 python, cygwin_path is a directory of cygwin binaries.
+
+ Args:
+ interp: interp command line
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ translated interp command line.
+ """
+ if not cygwin_path:
+ return interp
+ m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
+ if m:
+ cmd = os.path.join(cygwin_path, m.group(1))
+ return cmd + m.group(2)
+ return interp
+
+
+def get_script_interp(script_path, cygwin_path=None):
+ """Gets #!-interpreter command line from the script.
+
+ It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
+ it could run "/usr/bin/perl -wT hello.pl".
+ When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
+ "/usr/bin/perl" to "<cygwin_path>\perl.exe".
+
+ Args:
+ script_path: pathname of the script
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ #!-interpreter command line, or None if it is not #!-script.
+ """
+ fp = open(script_path)
+ line = fp.readline()
+ fp.close()
+ m = re.match('^#!(.*)', line)
+ if m:
+ return __translate_interp(m.group(1), cygwin_path)
+ return None
+
+
+def wrap_popen3_for_win(cygwin_path):
+ """Wrap popen3 to support #!-script on Windows.
+
+ Args:
+ cygwin_path: path for cygwin binary if command path is needed to be
+ translated. None if no translation required.
+ """
+
+ __orig_popen3 = os.popen3
+
+ def __wrap_popen3(cmd, mode='t', bufsize=-1):
+ cmdline = cmd.split(' ')
+ interp = get_script_interp(cmdline[0], cygwin_path)
+ if interp:
+ cmd = interp + ' ' + cmd
+ return __orig_popen3(cmd, mode, bufsize)
+
+ os.popen3 = __wrap_popen3
+
+
+def hexify(s):
+ return ' '.join(map(lambda x: '%02x' % ord(x), s))
+
+
+def get_class_logger(o):
+ return logging.getLogger(
+ '%s.%s' % (o.__class__.__module__, o.__class__.__name__))
+
+
+class NoopMasker(object):
+ """A masking object that has the same interface as RepeatedXorMasker but
+ just returns the string passed in without making any change.
+ """
+
+ def __init__(self):
+ pass
+
+ def mask(self, s):
+ return s
+
+
+class RepeatedXorMasker(object):
+ """A masking object that applies XOR on the string given to mask method
+ with the masking bytes given to the constructor repeatedly. This object
+ remembers the position in the masking bytes the last mask method call
+ ended and resumes from that point on the next mask method call.
+ """
+
+ def __init__(self, masking_key):
+ self._masking_key = masking_key
+ self._masking_key_index = 0
+
+ def _mask_using_swig(self, s):
+ masked_data = fast_masking.mask(
+ s, self._masking_key, self._masking_key_index)
+ self._masking_key_index = (
+ (self._masking_key_index + len(s)) % len(self._masking_key))
+ return masked_data
+
+ def _mask_using_array(self, s):
+ result = array.array('B')
+ result.fromstring(s)
+
+ # Use temporary local variables to eliminate the cost to access
+ # attributes
+ masking_key = map(ord, self._masking_key)
+ masking_key_size = len(masking_key)
+ masking_key_index = self._masking_key_index
+
+ for i in xrange(len(result)):
+ result[i] ^= masking_key[masking_key_index]
+ masking_key_index = (masking_key_index + 1) % masking_key_size
+
+ self._masking_key_index = masking_key_index
+
+ return result.tostring()
+
+ if 'fast_masking' in globals():
+ mask = _mask_using_swig
+ else:
+ mask = _mask_using_array
+
+
+# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
+# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
+# deflate library. DICTID won't be added as far as we don't set dictionary.
+# LZ77 window of 32K will be used for both compression and decompression.
+# For decompression, we can just use 32K to cover any windows size. For
+# compression, we use 32K so receivers must use 32K.
+#
+# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
+# to decode.
+#
+# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
+# Python. See also RFC1950 (ZLIB 3.3).
+
+
+class _Deflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+
+ self._compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
+
+ def compress(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_flush(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_finish(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_FINISH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+
+class _Inflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+ self._window_bits = window_bits
+
+ self._unconsumed = ''
+
+ self.reset()
+
+ def decompress(self, size):
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+
+ while True:
+ if size == -1:
+ data += self._decompress.decompress(self._unconsumed)
+ # See Python bug http://bugs.python.org/issue12050 to
+ # understand why the same code cannot be used for updating
+ # self._unconsumed for here and else block.
+ self._unconsumed = ''
+ else:
+ data += self._decompress.decompress(
+ self._unconsumed, size - len(data))
+ self._unconsumed = self._decompress.unconsumed_tail
+ if self._decompress.unused_data:
+ # Encountered a last block (i.e. a block with BFINAL = 1) and
+ # found a new stream (unused_data). We cannot use the same
+ # zlib.Decompress object for the new stream. Create a new
+ # Decompress object to decompress the new one.
+ #
+ # It's fine to ignore unconsumed_tail if unused_data is not
+ # empty.
+ self._unconsumed = self._decompress.unused_data
+ self.reset()
+ if size >= 0 and len(data) == size:
+ # data is filled. Don't call decompress again.
+ break
+ else:
+ # Re-invoke Decompress.decompress to try to decompress all
+ # available bytes before invoking read which blocks until
+ # any new byte is available.
+ continue
+ else:
+ # Here, since unused_data is empty, even if unconsumed_tail is
+ # not empty, bytes of requested length are already in data. We
+ # don't have to "continue" here.
+ break
+
+ if data:
+ self._logger.debug('Decompressed %r', data)
+ return data
+
+ def append(self, data):
+ self._logger.debug('Appended %r', data)
+ self._unconsumed += data
+
+ def reset(self):
+ self._logger.debug('Reset')
+ self._decompress = zlib.decompressobj(-self._window_bits)
+
+
+# Compresses/decompresses given octets using the method introduced in RFC1979.
+
+
+class _RFC1979Deflater(object):
+ """A compressor class that applies DEFLATE to given byte sequence and
+ flushes using the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits, no_context_takeover):
+ self._deflater = None
+ if window_bits is None:
+ window_bits = zlib.MAX_WBITS
+ self._window_bits = window_bits
+ self._no_context_takeover = no_context_takeover
+
+ def filter(self, bytes, end=True, bfinal=False):
+ if self._deflater is None:
+ self._deflater = _Deflater(self._window_bits)
+
+ if bfinal:
+ result = self._deflater.compress_and_finish(bytes)
+ # Add a padding block with BFINAL = 0 and BTYPE = 0.
+ result = result + chr(0)
+ self._deflater = None
+ return result
+
+ result = self._deflater.compress_and_flush(bytes)
+ if end:
+ # Strip last 4 octets which is LEN and NLEN field of a
+ # non-compressed block added for Z_SYNC_FLUSH.
+ result = result[:-4]
+
+ if self._no_context_takeover and end:
+ self._deflater = None
+
+ return result
+
+
+class _RFC1979Inflater(object):
+ """A decompressor class for byte sequence compressed and flushed following
+ the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits=zlib.MAX_WBITS):
+ self._inflater = _Inflater(window_bits)
+
+ def filter(self, bytes):
+ # Restore stripped LEN and NLEN field of a non-compressed block added
+ # for Z_SYNC_FLUSH.
+ self._inflater.append(bytes + '\x00\x00\xff\xff')
+ return self._inflater.decompress(-1)
+
+
+class DeflateSocket(object):
+ """A wrapper class for socket object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ # Size of the buffer passed to recv to receive compressed data.
+ _RECV_SIZE = 4096
+
+ def __init__(self, socket):
+ self._socket = socket
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater(zlib.MAX_WBITS)
+
+ def recv(self, size):
+ """Receives data from the socket specified on the construction up
+ to the specified size. Once any data is available, returns it even
+ if it's smaller than the specified size.
+ """
+
+ # TODO(tyoshino): Allow call with size=0. It should block until any
+ # decompressed data is available.
+ if size <= 0:
+ raise Exception('Non-positive size passed')
+ while True:
+ data = self._inflater.decompress(size)
+ if len(data) != 0:
+ return data
+
+ read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
+ if not read_data:
+ return ''
+ self._inflater.append(read_data)
+
+ def sendall(self, bytes):
+ self.send(bytes)
+
+ def send(self, bytes):
+ self._socket.sendall(self._deflater.compress_and_flush(bytes))
+ return len(bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/xhr_benchmark_handler.py b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/xhr_benchmark_handler.py
new file mode 100644
index 000000000..6735c7e2a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/mod_pywebsocket/xhr_benchmark_handler.py
@@ -0,0 +1,109 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Use of this source code is governed by a BSD-style
+# license that can be found in the COPYING file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+
+from mod_pywebsocket import util
+
+
+class XHRBenchmarkHandler(object):
+ def __init__(self, headers, rfile, wfile):
+ self._logger = util.get_class_logger(self)
+
+ self.headers = headers
+ self.rfile = rfile
+ self.wfile = wfile
+
+ def do_send(self):
+ content_length = int(self.headers.getheader('Content-Length'))
+
+ self._logger.debug('Requested to receive %s bytes', content_length)
+
+ RECEIVE_BLOCK_SIZE = 1024 * 1024
+
+ bytes_to_receive = content_length
+ while bytes_to_receive > 0:
+ bytes_to_receive_in_this_loop = bytes_to_receive
+ if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
+ bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
+ received_data = self.rfile.read(bytes_to_receive_in_this_loop)
+ if received_data != ('a' * bytes_to_receive_in_this_loop):
+ self._logger.debug('Request body verification failed')
+ return
+ bytes_to_receive -= len(received_data)
+ if bytes_to_receive < 0:
+ self._logger.debug('Received %d more bytes than expected' %
+ (-bytes_to_receive))
+ return
+
+ # Return the number of received bytes back to the client.
+ response_body = '%d' % content_length
+ self.wfile.write(
+ 'HTTP/1.1 200 OK\r\n'
+ 'Content-Type: text/html\r\n'
+ 'Content-Length: %d\r\n'
+ '\r\n%s' % (len(response_body), response_body))
+ self.wfile.flush()
+
+ def do_receive(self):
+ content_length = int(self.headers.getheader('Content-Length'))
+ request_body = self.rfile.read(content_length)
+
+ request_array = request_body.split(' ')
+ if len(request_array) < 2:
+ self._logger.debug('Malformed request body: %r', request_body)
+ return
+
+ # Parse the size parameter.
+ bytes_to_send = request_array[0]
+ try:
+ bytes_to_send = int(bytes_to_send)
+ except ValueError, e:
+ self._logger.debug('Malformed size parameter: %r', bytes_to_send)
+ return
+ self._logger.debug('Requested to send %s bytes', bytes_to_send)
+
+ # Parse the transfer encoding parameter.
+ chunked_mode = False
+ mode_parameter = request_array[1]
+ if mode_parameter == 'chunked':
+ self._logger.debug('Requested chunked transfer encoding')
+ chunked_mode = True
+ elif mode_parameter != 'none':
+ self._logger.debug('Invalid mode parameter: %r', mode_parameter)
+ return
+
+ # Write a header
+ response_header = (
+ 'HTTP/1.1 200 OK\r\n'
+ 'Content-Type: application/octet-stream\r\n')
+ if chunked_mode:
+ response_header += 'Transfer-Encoding: chunked\r\n\r\n'
+ else:
+ response_header += (
+ 'Content-Length: %d\r\n\r\n' % bytes_to_send)
+ self.wfile.write(response_header)
+ self.wfile.flush()
+
+ # Write a body
+ SEND_BLOCK_SIZE = 1024 * 1024
+
+ while bytes_to_send > 0:
+ bytes_to_send_in_this_loop = bytes_to_send
+ if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
+ bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
+
+ if chunked_mode:
+ self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
+ self.wfile.write('a' * bytes_to_send_in_this_loop)
+ if chunked_mode:
+ self.wfile.write('\r\n')
+ self.wfile.flush()
+
+ bytes_to_send -= bytes_to_send_in_this_loop
+
+ if chunked_mode:
+ self.wfile.write('0\r\n\r\n')
+ self.wfile.flush()
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/setup.py b/testing/web-platform/tests/tools/pywebsocket/src/setup.py
new file mode 100755
index 000000000..ada8db3e1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/setup.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Set up script for mod_pywebsocket.
+"""
+
+
+from distutils.core import setup, Extension
+import sys
+
+
+_PACKAGE_NAME = 'mod_pywebsocket'
+
+# Build and use a C++ extension for faster masking. SWIG is required.
+_USE_FAST_MASKING = False
+
+if sys.version < '2.3':
+ print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
+ sys.exit(1)
+
+if _USE_FAST_MASKING:
+ setup(ext_modules=[
+ Extension(
+ 'mod_pywebsocket/_fast_masking',
+ ['mod_pywebsocket/fast_masking.i'],
+ swig_opts=['-c++'])])
+
+setup(author='Yuzo Fujishima',
+ author_email='yuzo@chromium.org',
+ description='WebSocket extension for Apache HTTP Server.',
+ long_description=(
+ 'mod_pywebsocket is an Apache HTTP Server extension for '
+ 'the WebSocket Protocol (RFC 6455). '
+ 'See mod_pywebsocket/__init__.py for more detail.'),
+ license='See COPYING',
+ name=_PACKAGE_NAME,
+ packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
+ url='http://code.google.com/p/pywebsocket/',
+ # See the source of distutils.version, distutils.versionpredicate and
+ # distutils.dist to understand how to name version numbers.
+ version='0.7.9',
+ )
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/__init__.py b/testing/web-platform/tests/tools/pywebsocket/src/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/__init__.py
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/cert/cacert.pem b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/cacert.pem
new file mode 100644
index 000000000..4dadae121
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/cacert.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICvDCCAiWgAwIBAgIJAKqVghkGF1rSMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
+BAYTAkpQMQ4wDAYDVQQIEwVUb2t5bzEUMBIGA1UEChMLcHl3ZWJzb2NrZXQxFDAS
+BgNVBAMTC3B5d2Vic29ja2V0MB4XDTEyMDYwNjA3MjQzM1oXDTM5MTAyMzA3MjQz
+M1owSTELMAkGA1UEBhMCSlAxDjAMBgNVBAgTBVRva3lvMRQwEgYDVQQKEwtweXdl
+YnNvY2tldDEUMBIGA1UEAxMLcHl3ZWJzb2NrZXQwgZ8wDQYJKoZIhvcNAQEBBQAD
+gY0AMIGJAoGBAKoSEW2biQxVrMMKdn/8PJzDYiSXDPR9WQbLRRQ1Gm5jkCYiahXW
+u2CbTThfPPfi2NHA3I+HlT7gO9yR7RVUvN6ISUzGwXDEq4f4UNqtQOhQaqqK+CZ9
+LO/BhO/YYfNrbSPlYzHUKaT9ese7xO9VzVKLW+qUf2Mjh4/+SzxBDNP7AgMBAAGj
+gaswgagwHQYDVR0OBBYEFOsWdxCSuyhwaZeab6BoTho3++bzMHkGA1UdIwRyMHCA
+FOsWdxCSuyhwaZeab6BoTho3++bzoU2kSzBJMQswCQYDVQQGEwJKUDEOMAwGA1UE
+CBMFVG9reW8xFDASBgNVBAoTC3B5d2Vic29ja2V0MRQwEgYDVQQDEwtweXdlYnNv
+Y2tldIIJAKqVghkGF1rSMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
+gsMI1WEYqNw/jhUIdrTBcCxJ0X6hJvA9ziKANVm1Rs+4P3YDArkQ8bCr6xY+Kw7s
+Zp0yE7dM8GMdi+DU6hL3t3E5eMkTS1yZr9WCK4f2RLo+et98selZydpHemF3DJJ3
+gAj8Sx4LBaG8Cb/WnEMPv3MxG3fBE5favF6V4jU07hQ=
+-----END CERTIFICATE-----
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/cert/cert.pem b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/cert.pem
new file mode 100644
index 000000000..25379a72b
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/cert.pem
@@ -0,0 +1,61 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 1 (0x1)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=JP, ST=Tokyo, O=pywebsocket, CN=pywebsocket
+ Validity
+ Not Before: Jun 6 07:25:08 2012 GMT
+ Not After : Oct 23 07:25:08 2039 GMT
+ Subject: C=JP, ST=Tokyo, O=pywebsocket, CN=pywebsocket
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public Key: (1024 bit)
+ Modulus (1024 bit):
+ 00:de:10:ce:3a:5a:04:a4:1c:29:93:5c:23:82:1a:
+ f2:06:01:e6:2b:a4:0f:dd:77:49:76:89:03:a2:21:
+ de:04:75:c6:e2:dd:fb:35:27:3a:a2:92:8e:12:62:
+ 2b:3e:1f:f4:78:df:b6:94:cb:27:d6:cb:d6:37:d7:
+ 5c:08:f0:09:3e:c9:ce:24:2d:00:c9:df:4a:e0:99:
+ e5:fb:23:a9:e2:d6:c9:3d:96:fa:01:88:de:5a:89:
+ b0:cf:03:67:6f:04:86:1d:ef:62:1c:55:a9:07:9a:
+ 2e:66:2a:73:5b:4c:62:03:f9:82:83:db:68:bf:b8:
+ 4b:0b:8b:93:11:b8:54:73:7b
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Cert Type:
+ SSL Server
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 82:A1:73:8B:16:0C:7C:E4:D3:46:95:13:95:1A:32:C1:84:E9:06:00
+ X509v3 Authority Key Identifier:
+ keyid:EB:16:77:10:92:BB:28:70:69:97:9A:6F:A0:68:4E:1A:37:FB:E6:F3
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 6b:b3:46:29:02:df:b0:c8:8e:c4:d7:7f:a0:1e:0d:1a:eb:2f:
+ df:d1:48:57:36:5f:95:8c:1b:f0:51:d6:52:e7:8d:84:3b:9f:
+ d8:ed:22:9c:aa:bd:ee:9b:90:1d:84:a3:4c:0b:cb:eb:64:73:
+ ba:f7:15:ce:da:5f:db:8b:15:07:a6:28:7f:b9:8c:11:9b:64:
+ d3:f1:be:52:4f:c3:d8:58:fe:de:56:63:63:3b:51:ed:a7:81:
+ f9:05:51:70:63:32:09:0e:94:7e:05:fe:a1:56:18:34:98:d5:
+ 99:1e:4e:27:38:89:90:6a:e5:ce:60:35:01:f5:de:34:60:b1:
+ cb:ae
+-----BEGIN CERTIFICATE-----
+MIICmDCCAgGgAwIBAgIBATANBgkqhkiG9w0BAQUFADBJMQswCQYDVQQGEwJKUDEO
+MAwGA1UECBMFVG9reW8xFDASBgNVBAoTC3B5d2Vic29ja2V0MRQwEgYDVQQDEwtw
+eXdlYnNvY2tldDAeFw0xMjA2MDYwNzI1MDhaFw0zOTEwMjMwNzI1MDhaMEkxCzAJ
+BgNVBAYTAkpQMQ4wDAYDVQQIEwVUb2t5bzEUMBIGA1UEChMLcHl3ZWJzb2NrZXQx
+FDASBgNVBAMTC3B5d2Vic29ja2V0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
+gQDeEM46WgSkHCmTXCOCGvIGAeYrpA/dd0l2iQOiId4Edcbi3fs1Jzqiko4SYis+
+H/R437aUyyfWy9Y311wI8Ak+yc4kLQDJ30rgmeX7I6ni1sk9lvoBiN5aibDPA2dv
+BIYd72IcVakHmi5mKnNbTGID+YKD22i/uEsLi5MRuFRzewIDAQABo4GPMIGMMAkG
+A1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMCwGCWCGSAGG+EIBDQQfFh1PcGVu
+U1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUgqFzixYMfOTTRpUT
+lRoywYTpBgAwHwYDVR0jBBgwFoAU6xZ3EJK7KHBpl5pvoGhOGjf75vMwDQYJKoZI
+hvcNAQEFBQADgYEAa7NGKQLfsMiOxNd/oB4NGusv39FIVzZflYwb8FHWUueNhDuf
+2O0inKq97puQHYSjTAvL62RzuvcVztpf24sVB6Yof7mMEZtk0/G+Uk/D2Fj+3lZj
+YztR7aeB+QVRcGMyCQ6UfgX+oVYYNJjVmR5OJziJkGrlzmA1AfXeNGCxy64=
+-----END CERTIFICATE-----
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/cert/client_cert.p12 b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/client_cert.p12
new file mode 100644
index 000000000..14e139927
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/client_cert.p12
Binary files differ
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/cert/key.pem b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/key.pem
new file mode 100644
index 000000000..fae858318
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/cert/key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXgIBAAKBgQDeEM46WgSkHCmTXCOCGvIGAeYrpA/dd0l2iQOiId4Edcbi3fs1
+Jzqiko4SYis+H/R437aUyyfWy9Y311wI8Ak+yc4kLQDJ30rgmeX7I6ni1sk9lvoB
+iN5aibDPA2dvBIYd72IcVakHmi5mKnNbTGID+YKD22i/uEsLi5MRuFRzewIDAQAB
+AoGBAIuCuV1Vcnb7rm8CwtgZP5XgmY8vSjxTldafa6XvawEYUTP0S77v/1llg1Yv
+UIV+I+PQgG9oVoYOl22LoimHS/Z3e1fsot5tDYszGe8/Gkst4oaReSoxvBUa6WXp
+QSo7YFCajuHtE+W/gzF+UHbdzzXIDjQZ314LNF5t+4UnsEPBAkEA+girImqWoM2t
+3UR8f8oekERwsmEMf9DH5YpH4cvUnvI+kwesC/r2U8Sho++fyEMUNm7aIXGqNLga
+ogAM+4NX4QJBAONdSxSay22egTGNoIhLndljWkuOt/9FWj2klf/4QxD4blMJQ5Oq
+QdOGAh7nVQjpPLQ5D7CBVAKpGM2CD+QJBtsCQEP2kz35pxPylG3urcC2mfQxBkkW
+ZCViBNP58GwJ0bOauTOSBEwFXWuLqTw8aDwxL49UNmqc0N0fpe2fAehj3UECQQCm
+FH/DjU8Lw7ybddjNtm6XXPuYNagxz3cbkB4B3FchDleIUDwMoVF0MW9bI5/54mV1
+QDk1tUKortxvQZJaAD4BAkEAhGOHQqPd6bBBoFBvpaLzPJMxwLKrB+Wtkq/QlC72
+ClRiMn2g8SALiIL3BDgGXKcKE/Wy7jo/af/JCzQ/cPqt/A==
+-----END RSA PRIVATE KEY-----
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/client_for_testing.py b/testing/web-platform/tests/tools/pywebsocket/src/test/client_for_testing.py
new file mode 100644
index 000000000..c7f805ee9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/client_for_testing.py
@@ -0,0 +1,1100 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket client utility for testing.
+
+This module contains helper methods for performing handshake, frame
+sending/receiving as a WebSocket client.
+
+This is code for testing mod_pywebsocket. Keep this code independent from
+mod_pywebsocket. Don't import e.g. Stream class for generating frame for
+testing. Using util.hexify, etc. that are not related to protocol processing
+is allowed.
+
+Note:
+This code is far from robust, e.g., we cut corners in handshake.
+"""
+
+
+import base64
+import errno
+import logging
+import os
+import random
+import re
+import socket
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+
+
+DEFAULT_PORT = 80
+DEFAULT_SECURE_PORT = 443
+
+# Opcodes introduced in IETF HyBi 01 for the new framing format
+OPCODE_CONTINUATION = 0x0
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+
+# Strings used for handshake
+_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
+_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
+_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
+
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Status codes
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXT = 1010
+STATUS_INTERNAL_ENDPOINT_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+
+# Extension tokens
+_DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+# TODO(bashi): Update after mux implementation finished.
+_MUX_EXTENSION = 'mux_DO_NOT_USE'
+_PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
+
+def _method_line(resource):
+ return 'GET %s HTTP/1.1\r\n' % resource
+
+
+def _sec_origin_header(origin):
+ return 'Sec-WebSocket-Origin: %s\r\n' % origin.lower()
+
+
+def _origin_header(origin):
+ # 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
+ # and the /origin/ value, converted to ASCII lowercase, to /fields/.
+ return 'Origin: %s\r\n' % origin.lower()
+
+
+def _format_host_header(host, port, secure):
+ # 4.1 9. Let /hostport/ be an empty string.
+ # 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
+ # /hostport/
+ hostport = host.lower()
+ # 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
+ # is true, and /port/ is not 443, then append a U+003A COLON character
+ # (:) followed by the value of /port/, expressed as a base-ten integer,
+ # to /hostport/
+ if ((not secure and port != DEFAULT_PORT) or
+ (secure and port != DEFAULT_SECURE_PORT)):
+ hostport += ':' + str(port)
+ # 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
+ # character, and /hostport/, to /fields/.
+ return 'Host: %s\r\n' % hostport
+
+
+# TODO(tyoshino): Define a base class and move these shared methods to that.
+
+
+def receive_bytes(socket, length):
+ received_bytes = []
+ remaining = length
+ while remaining > 0:
+ new_received_bytes = socket.recv(remaining)
+ if not new_received_bytes:
+ raise Exception(
+ 'Connection closed before receiving requested length '
+ '(requested %d bytes but received only %d bytes)' %
+ (length, length - remaining))
+ received_bytes.append(new_received_bytes)
+ remaining -= len(new_received_bytes)
+ return ''.join(received_bytes)
+
+
+# TODO(tyoshino): Now the WebSocketHandshake class diverts these methods. We
+# should move to HTTP parser as specified in RFC 6455. For HyBi 00 and
+# Hixie 75, pack these methods as some parser class.
+
+
+def _read_fields(socket):
+ # 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
+ fields = {}
+ while True:
+ # 4.1 33. let /name/ and /value/ be empty byte arrays
+ name = ''
+ value = ''
+ # 4.1 34. read /name/
+ name = _read_name(socket)
+ if name is None:
+ break
+ # 4.1 35. read spaces
+ # TODO(tyoshino): Skip only one space as described in the spec.
+ ch = _skip_spaces(socket)
+ # 4.1 36. read /value/
+ value = _read_value(socket, ch)
+ # 4.1 37. read a byte from the server
+ ch = receive_bytes(socket, 1)
+ if ch != '\n': # 0x0A
+ raise Exception(
+ 'Expected LF but found %r while reading value %r for header '
+ '%r' % (ch, name, value))
+ # 4.1 38. append an entry to the /fields/ list that has the name
+ # given by the string obtained by interpreting the /name/ byte
+ # array as a UTF-8 stream and the value given by the string
+ # obtained by interpreting the /value/ byte array as a UTF-8 byte
+ # stream.
+ fields.setdefault(name, []).append(value)
+ # 4.1 39. return to the "Field" step above
+ return fields
+
+
+def _read_name(socket):
+ # 4.1 33. let /name/ be empty byte arrays
+ name = ''
+ while True:
+ # 4.1 34. read a byte from the server
+ ch = receive_bytes(socket, 1)
+ if ch == '\r': # 0x0D
+ return None
+ elif ch == '\n': # 0x0A
+ raise Exception(
+ 'Unexpected LF when reading header name %r' % name)
+ elif ch == ':': # 0x3A
+ return name
+ elif ch >= 'A' and ch <= 'Z': # range 0x31 to 0x5A
+ ch = chr(ord(ch) + 0x20)
+ name += ch
+ else:
+ name += ch
+
+
+def _skip_spaces(socket):
+ # 4.1 35. read a byte from the server
+ while True:
+ ch = receive_bytes(socket, 1)
+ if ch == ' ': # 0x20
+ continue
+ return ch
+
+
+def _read_value(socket, ch):
+ # 4.1 33. let /value/ be empty byte arrays
+ value = ''
+ # 4.1 36. read a byte from server.
+ while True:
+ if ch == '\r': # 0x0D
+ return value
+ elif ch == '\n': # 0x0A
+ raise Exception(
+ 'Unexpected LF when reading header value %r' % value)
+ else:
+ value += ch
+ ch = receive_bytes(socket, 1)
+
+
+def read_frame_header(socket):
+ received = receive_bytes(socket, 2)
+
+ first_byte = ord(received[0])
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xf
+
+ second_byte = ord(received[1])
+ mask = (second_byte >> 7) & 1
+ payload_length = second_byte & 0x7f
+
+ if mask != 0:
+ raise Exception(
+ 'Mask bit must be 0 for frames coming from server')
+
+ if payload_length == 127:
+ extended_payload_length = receive_bytes(socket, 8)
+ payload_length = struct.unpack(
+ '!Q', extended_payload_length)[0]
+ if payload_length > 0x7FFFFFFFFFFFFFFF:
+ raise Exception('Extended payload length >= 2^63')
+ elif payload_length == 126:
+ extended_payload_length = receive_bytes(socket, 2)
+ payload_length = struct.unpack(
+ '!H', extended_payload_length)[0]
+
+ return fin, rsv1, rsv2, rsv3, opcode, payload_length
+
+
+class _TLSSocket(object):
+ """Wrapper for a TLS connection."""
+
+ def __init__(self, raw_socket):
+ self._ssl = socket.ssl(raw_socket)
+
+ def send(self, bytes):
+ return self._ssl.write(bytes)
+
+ def recv(self, size=-1):
+ return self._ssl.read(size)
+
+ def close(self):
+ # Nothing to do.
+ pass
+
+
+class HttpStatusException(Exception):
+ """This exception will be raised when unexpected http status code was
+ received as a result of handshake.
+ """
+
+ def __init__(self, name, status):
+ super(HttpStatusException, self).__init__(name)
+ self.status = status
+
+
+class WebSocketHandshake(object):
+ """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, options):
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ def handshake(self, socket):
+ """Handshake WebSocket.
+
+ Raises:
+ Exception: handshake failed.
+ """
+
+ self._socket = socket
+
+ request_line = _method_line(self._options.resource)
+ self._logger.debug('Opening handshake Request-Line: %r', request_line)
+ self._socket.sendall(request_line)
+
+ fields = []
+ fields.append(_UPGRADE_HEADER)
+ fields.append(_CONNECTION_HEADER)
+
+ fields.append(_format_host_header(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.use_tls))
+
+ if self._options.version is 8:
+ fields.append(_sec_origin_header(self._options.origin))
+ else:
+ fields.append(_origin_header(self._options.origin))
+
+ original_key = os.urandom(16)
+ key = base64.b64encode(original_key)
+ self._logger.debug(
+ 'Sec-WebSocket-Key: %s (%s)', key, util.hexify(original_key))
+ fields.append('Sec-WebSocket-Key: %s\r\n' % key)
+
+ fields.append('Sec-WebSocket-Version: %d\r\n' % self._options.version)
+
+ # Setting up extensions.
+ if len(self._options.extensions) > 0:
+ fields.append('Sec-WebSocket-Extensions: %s\r\n' %
+ ', '.join(self._options.extensions))
+
+ self._logger.debug('Opening handshake request headers: %r', fields)
+
+ for field in fields:
+ self._socket.sendall(field)
+ self._socket.sendall('\r\n')
+
+ self._logger.info('Sent opening handshake request')
+
+ field = ''
+ while True:
+ ch = receive_bytes(self._socket, 1)
+ field += ch
+ if ch == '\n':
+ break
+
+ self._logger.debug('Opening handshake Response-Line: %r', field)
+
+ if len(field) < 7 or not field.endswith('\r\n'):
+ raise Exception('Wrong status line: %r' % field)
+ m = re.match('[^ ]* ([^ ]*) .*', field)
+ if m is None:
+ raise Exception(
+ 'No HTTP status code found in status line: %r' % field)
+ code = m.group(1)
+ if not re.match('[0-9][0-9][0-9]', code):
+ raise Exception(
+ 'HTTP status code %r is not three digit in status line: %r' %
+ (code, field))
+ if code != '101':
+ raise HttpStatusException(
+ 'Expected HTTP status code 101 but found %r in status line: '
+ '%r' % (code, field), int(code))
+ fields = _read_fields(self._socket)
+ ch = receive_bytes(self._socket, 1)
+ if ch != '\n': # 0x0A
+ raise Exception('Expected LF but found: %r' % ch)
+
+ self._logger.debug('Opening handshake response headers: %r', fields)
+
+ # Check /fields/
+ if len(fields['upgrade']) != 1:
+ raise Exception(
+ 'Multiple Upgrade headers found: %s' % fields['upgrade'])
+ if len(fields['connection']) != 1:
+ raise Exception(
+ 'Multiple Connection headers found: %s' % fields['connection'])
+ if fields['upgrade'][0] != 'websocket':
+ raise Exception(
+ 'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
+ if fields['connection'][0].lower() != 'upgrade':
+ raise Exception(
+ 'Unexpected Connection header value: %s' %
+ fields['connection'][0])
+
+ if len(fields['sec-websocket-accept']) != 1:
+ raise Exception(
+ 'Multiple Sec-WebSocket-Accept headers found: %s' %
+ fields['sec-websocket-accept'])
+
+ accept = fields['sec-websocket-accept'][0]
+
+ # Validate
+ try:
+ decoded_accept = base64.b64decode(accept)
+ except TypeError, e:
+ raise HandshakeException(
+ 'Illegal value for header Sec-WebSocket-Accept: ' + accept)
+
+ if len(decoded_accept) != 20:
+ raise HandshakeException(
+ 'Decoded value of Sec-WebSocket-Accept is not 20-byte long')
+
+ self._logger.debug('Actual Sec-WebSocket-Accept: %r (%s)',
+ accept, util.hexify(decoded_accept))
+
+ original_expected_accept = util.sha1_hash(
+ key + WEBSOCKET_ACCEPT_UUID).digest()
+ expected_accept = base64.b64encode(original_expected_accept)
+
+ self._logger.debug('Expected Sec-WebSocket-Accept: %r (%s)',
+ expected_accept,
+ util.hexify(original_expected_accept))
+
+ if accept != expected_accept:
+ raise Exception(
+ 'Invalid Sec-WebSocket-Accept header: %r (expected) != %r '
+ '(actual)' % (accept, expected_accept))
+
+ server_extensions_header = fields.get('sec-websocket-extensions')
+ accepted_extensions = []
+ if server_extensions_header is not None:
+ accepted_extensions = common.parse_extensions(
+ ', '.join(server_extensions_header))
+
+ # Scan accepted extension list to check if there is any unrecognized
+ # extensions or extensions we didn't request in it. Then, for
+ # extensions we request, parse them and store parameters. They will be
+ # used later by each extension.
+ deflate_frame_accepted = False
+ mux_accepted = False
+ for extension in accepted_extensions:
+ if extension.name() == _DEFLATE_FRAME_EXTENSION:
+ if self._options.use_deflate_frame:
+ deflate_frame_accepted = True
+ continue
+ if extension.name() == _MUX_EXTENSION:
+ if self._options.use_mux:
+ mux_accepted = True
+ continue
+ if extension.name() == _PERMESSAGE_DEFLATE_EXTENSION:
+ checker = self._options.check_permessage_deflate
+ if checker:
+ checker(extension)
+ continue
+
+ raise Exception(
+ 'Received unrecognized extension: %s' % extension.name())
+
+ # Let all extensions check the response for extension request.
+
+ if (self._options.use_deflate_frame and
+ not deflate_frame_accepted):
+ raise Exception('%s extension not accepted' %
+ _DEFLATE_FRAME_EXTENSION)
+
+ if self._options.use_mux and not mux_accepted:
+ raise Exception('%s extension not accepted' % _MUX_EXTENSION)
+
+
+class WebSocketHybi00Handshake(object):
+ """Opening handshake processor for the WebSocket protocol version HyBi 00.
+ """
+
+ def __init__(self, options, draft_field):
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+ self._draft_field = draft_field
+
+ def handshake(self, socket):
+ """Handshake WebSocket.
+
+ Raises:
+ Exception: handshake failed.
+ """
+
+ self._socket = socket
+
+ # 4.1 5. send request line.
+ request_line = _method_line(self._options.resource)
+ self._logger.debug('Opening handshake Request-Line: %r', request_line)
+ self._socket.sendall(request_line)
+ # 4.1 6. Let /fields/ be an empty list of strings.
+ fields = []
+ # 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
+ fields.append(_UPGRADE_HEADER_HIXIE75)
+ # 4.1 8. Add the string "Connection: Upgrade" to /fields/.
+ fields.append(_CONNECTION_HEADER)
+ # 4.1 9-12. Add Host: field to /fields/.
+ fields.append(_format_host_header(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.use_tls))
+ # 4.1 13. Add Origin: field to /fields/.
+ fields.append(_origin_header(self._options.origin))
+ # TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
+ # TODO: 4.1 15 Add cookie headers to /fields/.
+
+ # 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
+ self._number1, key1 = self._generate_sec_websocket_key()
+ self._logger.debug('Number1: %d', self._number1)
+ fields.append('Sec-WebSocket-Key1: %s\r\n' % key1)
+ self._number2, key2 = self._generate_sec_websocket_key()
+ self._logger.debug('Number2: %d', self._number1)
+ fields.append('Sec-WebSocket-Key2: %s\r\n' % key2)
+
+ fields.append('Sec-WebSocket-Draft: %s\r\n' % self._draft_field)
+
+ # 4.1 24. For each string in /fields/, in a random order: send the
+ # string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
+ # RETURN U+000A LINE FEED character pair (CRLF).
+ random.shuffle(fields)
+
+ self._logger.debug('Opening handshake request headers: %r', fields)
+ for field in fields:
+ self._socket.sendall(field)
+
+ # 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
+ # character pair (CRLF).
+ self._socket.sendall('\r\n')
+ # 4.1 26. let /key3/ be a string consisting of eight random bytes (or
+ # equivalently, a random 64 bit integer encoded in a big-endian order).
+ self._key3 = self._generate_key3()
+ # 4.1 27. send /key3/ to the server.
+ self._socket.sendall(self._key3)
+ self._logger.debug(
+ 'Key3: %r (%s)', self._key3, util.hexify(self._key3))
+
+ self._logger.info('Sent opening handshake request')
+
+ # 4.1 28. Read bytes from the server until either the connection
+ # closes, or a 0x0A byte is read. let /field/ be these bytes, including
+ # the 0x0A bytes.
+ field = ''
+ while True:
+ ch = receive_bytes(self._socket, 1)
+ field += ch
+ if ch == '\n':
+ break
+
+ self._logger.debug('Opening handshake Response-Line: %r', field)
+
+ # if /field/ is not at least seven bytes long, or if the last
+ # two bytes aren't 0x0D and 0x0A respectively, or if it does not
+ # contain at least two 0x20 bytes, then fail the WebSocket connection
+ # and abort these steps.
+ if len(field) < 7 or not field.endswith('\r\n'):
+ raise Exception('Wrong status line: %r' % field)
+ m = re.match('[^ ]* ([^ ]*) .*', field)
+ if m is None:
+ raise Exception('No code found in status line: %r' % field)
+ # 4.1 29. let /code/ be the substring of /field/ that starts from the
+ # byte after the first 0x20 byte, and ends with the byte before the
+ # second 0x20 byte.
+ code = m.group(1)
+ # 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
+ # /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
+ # connection and abort these steps.
+ if not re.match('[0-9][0-9][0-9]', code):
+ raise Exception(
+ 'HTTP status code %r is not three digit in status line: %r' %
+ (code, field))
+ # 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
+ # next step.
+ if code != '101':
+ raise HttpStatusException(
+ 'Expected HTTP status code 101 but found %r in status line: '
+ '%r' % (code, field), int(code))
+ # 4.1 32-39. read fields into /fields/
+ fields = _read_fields(self._socket)
+
+ self._logger.debug('Opening handshake response headers: %r', fields)
+
+ # 4.1 40. _Fields processing_
+ # read a byte from server
+ ch = receive_bytes(self._socket, 1)
+ if ch != '\n': # 0x0A
+ raise Exception('Expected LF but found %r' % ch)
+ # 4.1 41. check /fields/
+ if len(fields['upgrade']) != 1:
+ raise Exception(
+ 'Multiple Upgrade headers found: %s' % fields['upgrade'])
+ if len(fields['connection']) != 1:
+ raise Exception(
+ 'Multiple Connection headers found: %s' % fields['connection'])
+ if len(fields['sec-websocket-origin']) != 1:
+ raise Exception(
+ 'Multiple Sec-WebSocket-Origin headers found: %s' %
+ fields['sec-sebsocket-origin'])
+ if len(fields['sec-websocket-location']) != 1:
+ raise Exception(
+ 'Multiple Sec-WebSocket-Location headers found: %s' %
+ fields['sec-sebsocket-location'])
+ # TODO(ukai): protocol
+ # if the entry's name is "upgrade"
+ # if the value is not exactly equal to the string "WebSocket",
+ # then fail the WebSocket connection and abort these steps.
+ if fields['upgrade'][0] != 'WebSocket':
+ raise Exception(
+ 'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
+ # if the entry's name is "connection"
+ # if the value, converted to ASCII lowercase, is not exactly equal
+ # to the string "upgrade", then fail the WebSocket connection and
+ # abort these steps.
+ if fields['connection'][0].lower() != 'upgrade':
+ raise Exception(
+ 'Unexpected Connection header value: %s' %
+ fields['connection'][0])
+ # TODO(ukai): check origin, location, cookie, ..
+
+ # 4.1 42. let /challenge/ be the concatenation of /number_1/,
+ # expressed as a big endian 32 bit integer, /number_2/, expressed
+ # as big endian 32 bit integer, and the eight bytes of /key_3/ in the
+ # order they were sent on the wire.
+ challenge = struct.pack('!I', self._number1)
+ challenge += struct.pack('!I', self._number2)
+ challenge += self._key3
+
+ self._logger.debug(
+ 'Challenge: %r (%s)', challenge, util.hexify(challenge))
+
+ # 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
+ # big-endian 128 bit string.
+ expected = util.md5_hash(challenge).digest()
+ self._logger.debug(
+ 'Expected challenge response: %r (%s)',
+ expected, util.hexify(expected))
+
+ # 4.1 44. read sixteen bytes from the server.
+ # let /reply/ be those bytes.
+ reply = receive_bytes(self._socket, 16)
+ self._logger.debug(
+ 'Actual challenge response: %r (%s)', reply, util.hexify(reply))
+
+ # 4.1 45. if /reply/ does not exactly equal /expected/, then fail
+ # the WebSocket connection and abort these steps.
+ if expected != reply:
+ raise Exception(
+ 'Bad challenge response: %r (expected) != %r (actual)' %
+ (expected, reply))
+ # 4.1 46. The *WebSocket connection is established*.
+
+ def _generate_sec_websocket_key(self):
+ # 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
+ spaces = random.randint(1, 12)
+ # 4.1 17. let /max_n/ be the largest integer not greater than
+ # 4,294,967,295 divided by /spaces_n/.
+ maxnum = 4294967295 / spaces
+ # 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
+ # inclusive.
+ number = random.randint(0, maxnum)
+ # 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
+ # /spaces_n/ together.
+ product = number * spaces
+ # 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
+ # in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
+ # U+0039 DIGIT NINE (9).
+ key = str(product)
+ # 4.1 21. insert between one and twelve random characters from the
+ # range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
+ # positions.
+ available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
+ n = random.randint(1, 12)
+ for _ in xrange(n):
+ ch = random.choice(available_chars)
+ pos = random.randint(0, len(key))
+ key = key[0:pos] + chr(ch) + key[pos:]
+ # 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
+ # random positions other than start or end of the string.
+ for _ in xrange(spaces):
+ pos = random.randint(1, len(key) - 1)
+ key = key[0:pos] + ' ' + key[pos:]
+ return number, key
+
+ def _generate_key3(self):
+ # 4.1 26. let /key3/ be a string consisting of eight random bytes (or
+ # equivalently, a random 64 bit integer encoded in a big-endian order).
+ return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
+
+
+class WebSocketHixie75Handshake(object):
+ """WebSocket handshake processor for IETF Hixie 75."""
+
+ _EXPECTED_RESPONSE = (
+ 'HTTP/1.1 101 Web Socket Protocol Handshake\r\n' +
+ _UPGRADE_HEADER_HIXIE75 +
+ _CONNECTION_HEADER)
+
+ def __init__(self, options):
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ def _skip_headers(self):
+ terminator = '\r\n\r\n'
+ pos = 0
+ while pos < len(terminator):
+ received = receive_bytes(self._socket, 1)
+ if received == terminator[pos]:
+ pos += 1
+ elif received == terminator[0]:
+ pos = 1
+ else:
+ pos = 0
+
+ def handshake(self, socket):
+ self._socket = socket
+
+ request_line = _method_line(self._options.resource)
+ self._logger.debug('Opening handshake Request-Line: %r', request_line)
+ self._socket.sendall(request_line)
+
+ headers = _UPGRADE_HEADER_HIXIE75 + _CONNECTION_HEADER
+ headers += _format_host_header(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.use_tls)
+ headers += _origin_header(self._options.origin)
+ self._logger.debug('Opening handshake request headers: %r', headers)
+ self._socket.sendall(headers)
+
+ self._socket.sendall('\r\n')
+
+ self._logger.info('Sent opening handshake request')
+
+ for expected_char in WebSocketHixie75Handshake._EXPECTED_RESPONSE:
+ received = receive_bytes(self._socket, 1)
+ if expected_char != received:
+ raise Exception('Handshake failure')
+ # We cut corners and skip other headers.
+ self._skip_headers()
+
+
+class WebSocketStream(object):
+ """Frame processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, socket, handshake):
+ self._handshake = handshake
+ self._socket = socket
+
+ # Filters applied to application data part of data frames.
+ self._outgoing_frame_filter = None
+ self._incoming_frame_filter = None
+
+ if self._handshake._options.use_deflate_frame:
+ self._outgoing_frame_filter = (
+ util._RFC1979Deflater(None, False))
+ self._incoming_frame_filter = util._RFC1979Inflater()
+
+ self._fragmented = False
+
+ def _mask_hybi(self, s):
+ # TODO(tyoshino): os.urandom does open/read/close for every call. If
+ # performance matters, change this to some library call that generates
+ # cryptographically secure pseudo random number sequence.
+ masking_nonce = os.urandom(4)
+ result = [masking_nonce]
+ count = 0
+ for c in s:
+ result.append(chr(ord(c) ^ ord(masking_nonce[count])))
+ count = (count + 1) % len(masking_nonce)
+ return ''.join(result)
+
+ def send_frame_of_arbitrary_bytes(self, header, body):
+ self._socket.sendall(header + self._mask_hybi(body))
+
+ def send_data(self, payload, frame_type, end=True, mask=True,
+ rsv1=0, rsv2=0, rsv3=0):
+ if self._outgoing_frame_filter is not None:
+ payload = self._outgoing_frame_filter.filter(payload)
+
+ if self._fragmented:
+ opcode = OPCODE_CONTINUATION
+ else:
+ opcode = frame_type
+
+ if end:
+ self._fragmented = False
+ fin = 1
+ else:
+ self._fragmented = True
+ fin = 0
+
+ if self._handshake._options.use_deflate_frame:
+ rsv1 = 1
+
+ if mask:
+ mask_bit = 1 << 7
+ else:
+ mask_bit = 0
+
+ header = chr(fin << 7 | rsv1 << 6 | rsv2 << 5 | rsv3 << 4 | opcode)
+ payload_length = len(payload)
+ if payload_length <= 125:
+ header += chr(mask_bit | payload_length)
+ elif payload_length < 1 << 16:
+ header += chr(mask_bit | 126) + struct.pack('!H', payload_length)
+ elif payload_length < 1 << 63:
+ header += chr(mask_bit | 127) + struct.pack('!Q', payload_length)
+ else:
+ raise Exception('Too long payload (%d byte)' % payload_length)
+ if mask:
+ payload = self._mask_hybi(payload)
+ self._socket.sendall(header + payload)
+
+ def send_binary(self, payload, end=True, mask=True):
+ self.send_data(payload, OPCODE_BINARY, end, mask)
+
+ def send_text(self, payload, end=True, mask=True):
+ self.send_data(payload.encode('utf-8'), OPCODE_TEXT, end, mask)
+
+ def _assert_receive_data(self, payload, opcode, fin, rsv1, rsv2, rsv3):
+ (actual_fin, actual_rsv1, actual_rsv2, actual_rsv3, actual_opcode,
+ payload_length) = read_frame_header(self._socket)
+
+ if actual_opcode != opcode:
+ raise Exception(
+ 'Unexpected opcode: %d (expected) vs %d (actual)' %
+ (opcode, actual_opcode))
+
+ if actual_fin != fin:
+ raise Exception(
+ 'Unexpected fin: %d (expected) vs %d (actual)' %
+ (fin, actual_fin))
+
+ if rsv1 is None:
+ rsv1 = 0
+ if self._handshake._options.use_deflate_frame:
+ rsv1 = 1
+
+ if rsv2 is None:
+ rsv2 = 0
+
+ if rsv3 is None:
+ rsv3 = 0
+
+ if actual_rsv1 != rsv1:
+ raise Exception(
+ 'Unexpected rsv1: %r (expected) vs %r (actual)' %
+ (rsv1, actual_rsv1))
+
+ if actual_rsv2 != rsv2:
+ raise Exception(
+ 'Unexpected rsv2: %r (expected) vs %r (actual)' %
+ (rsv2, actual_rsv2))
+
+ if actual_rsv3 != rsv3:
+ raise Exception(
+ 'Unexpected rsv3: %r (expected) vs %r (actual)' %
+ (rsv3, actual_rsv3))
+
+ received = receive_bytes(self._socket, payload_length)
+
+ if self._incoming_frame_filter is not None:
+ received = self._incoming_frame_filter.filter(received)
+
+ if len(received) != len(payload):
+ raise Exception(
+ 'Unexpected payload length: %d (expected) vs %d (actual)' %
+ (len(payload), len(received)))
+
+ if payload != received:
+ raise Exception(
+ 'Unexpected payload: %r (expected) vs %r (actual)' %
+ (payload, received))
+
+ def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
+ rsv1=None, rsv2=None, rsv3=None):
+ self._assert_receive_data(payload, opcode, fin, rsv1, rsv2, rsv3)
+
+ def assert_receive_text(self, payload, opcode=OPCODE_TEXT, fin=1,
+ rsv1=None, rsv2=None, rsv3=None):
+ self._assert_receive_data(payload.encode('utf-8'), opcode, fin, rsv1,
+ rsv2, rsv3)
+
+ def _build_close_frame(self, code, reason, mask):
+ frame = chr(1 << 7 | OPCODE_CLOSE)
+
+ if code is not None:
+ body = struct.pack('!H', code) + reason.encode('utf-8')
+ else:
+ body = ''
+ if mask:
+ frame += chr(1 << 7 | len(body)) + self._mask_hybi(body)
+ else:
+ frame += chr(len(body)) + body
+ return frame
+
+ def send_close(self, code, reason):
+ self._socket.sendall(
+ self._build_close_frame(code, reason, True))
+
+ def assert_receive_close(self, code, reason):
+ expected_frame = self._build_close_frame(code, reason, False)
+ actual_frame = receive_bytes(self._socket, len(expected_frame))
+ if actual_frame != expected_frame:
+ raise Exception(
+ 'Unexpected close frame: %r (expected) vs %r (actual)' %
+ (expected_frame, actual_frame))
+
+
+class WebSocketStreamHixie75(object):
+ """Frame processor for the WebSocket protocol version Hixie 75 and HyBi 00.
+ """
+
+ _CLOSE_FRAME = '\xff\x00'
+
+ def __init__(self, socket, unused_handshake):
+ self._socket = socket
+
+ def send_frame_of_arbitrary_bytes(self, header, body):
+ self._socket.sendall(header + body)
+
+ def send_data(self, payload, unused_frame_typem, unused_end, unused_mask):
+ frame = ''.join(['\x00', payload, '\xff'])
+ self._socket.sendall(frame)
+
+ def send_binary(self, unused_payload, unused_end, unused_mask):
+ pass
+
+ def send_text(self, payload, unused_end, unused_mask):
+ encoded_payload = payload.encode('utf-8')
+ frame = ''.join(['\x00', encoded_payload, '\xff'])
+ self._socket.sendall(frame)
+
+ def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
+ rsv1=0, rsv2=0, rsv3=0):
+ raise Exception('Binary frame is not supported in hixie75')
+
+ def assert_receive_text(self, payload):
+ received = receive_bytes(self._socket, 1)
+
+ if received != '\x00':
+ raise Exception(
+ 'Unexpected frame type: %d (expected) vs %d (actual)' %
+ (0, ord(received)))
+
+ received = receive_bytes(self._socket, len(payload) + 1)
+ if received[-1] != '\xff':
+ raise Exception(
+ 'Termination expected: 0xff (expected) vs %r (actual)' %
+ received)
+
+ if received[0:-1] != payload:
+ raise Exception(
+ 'Unexpected payload: %r (expected) vs %r (actual)' %
+ (payload, received[0:-1]))
+
+ def send_close(self, code, reason):
+ self._socket.sendall(self._CLOSE_FRAME)
+
+ def assert_receive_close(self, unused_code, unused_reason):
+ closing = receive_bytes(self._socket, len(self._CLOSE_FRAME))
+ if closing != self._CLOSE_FRAME:
+ raise Exception('Didn\'t receive closing handshake')
+
+
+class ClientOptions(object):
+ """Holds option values to configure the Client object."""
+
+ def __init__(self):
+ self.version = 13
+ self.server_host = ''
+ self.origin = ''
+ self.resource = ''
+ self.server_port = -1
+ self.socket_timeout = 1000
+ self.use_tls = False
+ self.extensions = []
+ # Enable deflate-application-data.
+ self.use_deflate_frame = False
+ # Enable mux
+ self.use_mux = False
+
+ def enable_deflate_frame(self):
+ self.use_deflate_frame = True
+ self.extensions.append(_DEFLATE_FRAME_EXTENSION)
+
+ def enable_mux(self):
+ self.use_mux = True
+ self.extensions.append(_MUX_EXTENSION)
+
+
+def connect_socket_with_retry(host, port, timeout, use_tls,
+ retry=10, sleep_sec=0.1):
+ retry_count = 0
+ while retry_count < retry:
+ try:
+ s = socket.socket()
+ s.settimeout(timeout)
+ s.connect((host, port))
+ if use_tls:
+ return _TLSSocket(s)
+ return s
+ except socket.error, e:
+ if e.errno != errno.ECONNREFUSED:
+ raise
+ else:
+ retry_count = retry_count + 1
+ time.sleep(sleep_sec)
+
+ return None
+
+
+class Client(object):
+ """WebSocket client."""
+
+ def __init__(self, options, handshake, stream_class):
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+ self._socket = None
+
+ self._handshake = handshake
+ self._stream_class = stream_class
+
+ def connect(self):
+ self._socket = connect_socket_with_retry(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.socket_timeout,
+ self._options.use_tls)
+
+ self._handshake.handshake(self._socket)
+
+ self._stream = self._stream_class(self._socket, self._handshake)
+
+ self._logger.info('Connection established')
+
+ def send_frame_of_arbitrary_bytes(self, header, body):
+ self._stream.send_frame_of_arbitrary_bytes(header, body)
+
+ def send_message(self, message, end=True, binary=False, raw=False,
+ mask=True):
+ if binary:
+ self._stream.send_binary(message, end, mask)
+ elif raw:
+ self._stream.send_data(message, OPCODE_TEXT, end, mask)
+ else:
+ self._stream.send_text(message, end, mask)
+
+ def assert_receive(self, payload, binary=False):
+ if binary:
+ self._stream.assert_receive_binary(payload)
+ else:
+ self._stream.assert_receive_text(payload)
+
+ def send_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
+ self._stream.send_close(code, reason)
+
+ def assert_receive_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
+ self._stream.assert_receive_close(code, reason)
+
+ def close_socket(self):
+ self._socket.close()
+
+ def assert_connection_closed(self):
+ try:
+ read_data = receive_bytes(self._socket, 1)
+ except Exception, e:
+ if str(e).find(
+ 'Connection closed before receiving requested length ') == 0:
+ return
+ try:
+ error_number, message = e
+ for error_name in ['ECONNRESET', 'WSAECONNRESET']:
+ if (error_name in dir(errno) and
+ error_number == getattr(errno, error_name)):
+ return
+ except:
+ raise e
+ raise e
+
+ raise Exception('Connection is not closed (Read: %r)' % read_data)
+
+
+def create_client(options):
+ return Client(
+ options, WebSocketHandshake(options), WebSocketStream)
+
+
+def create_client_hybi00(options):
+ return Client(
+ options,
+ WebSocketHybi00Handshake(options, '0'),
+ WebSocketStreamHixie75)
+
+
+def create_client_hixie75(options):
+ return Client(
+ options, WebSocketHixie75Handshake(options), WebSocketStreamHixie75)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/endtoend_with_external_server.py b/testing/web-platform/tests/tools/pywebsocket/src/test/endtoend_with_external_server.py
new file mode 100755
index 000000000..47f86fdb4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/endtoend_with_external_server.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Test for end-to-end with external server.
+
+This test is not run by run_all.py because it requires some preparations.
+If you would like to run this test correctly, launch Apache with mod_python
+and mod_pywebsocket manually. In addition, you should pass allow_draft75 option
+and example path as handler_scan option and Apache's DocumentRoot.
+"""
+
+
+import optparse
+import sys
+import test.test_endtoend
+import unittest
+
+
+_DEFAULT_WEB_SOCKET_PORT = 80
+
+
+class EndToEndTestWithExternalServer(test.test_endtoend.EndToEndTest):
+ pass
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser()
+ parser.add_option('-p', '--port', dest='port', type='int',
+ default=_DEFAULT_WEB_SOCKET_PORT,
+ help='external test server port.')
+ (options, args) = parser.parse_args()
+
+ test.test_endtoend._use_external_server = True
+ test.test_endtoend._external_server_port = options.port
+
+ unittest.main(argv=[sys.argv[0]])
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/mock.py b/testing/web-platform/tests/tools/pywebsocket/src/test/mock.py
new file mode 100644
index 000000000..6bffcac48
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/mock.py
@@ -0,0 +1,221 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Mocks for testing.
+"""
+
+
+import Queue
+import threading
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+
+
+class _MockConnBase(object):
+ """Base class of mocks for mod_python.apache.mp_conn.
+
+ This enables tests to check what is written to a (mock) mp_conn.
+ """
+
+ def __init__(self):
+ self._write_data = []
+ self.remote_addr = 'fake_address'
+
+ def write(self, data):
+ """Override mod_python.apache.mp_conn.write."""
+
+ self._write_data.append(data)
+
+ def written_data(self):
+ """Get bytes written to this mock."""
+
+ return ''.join(self._write_data)
+
+
+class MockConn(_MockConnBase):
+ """Mock for mod_python.apache.mp_conn.
+
+ This enables tests to specify what should be read from a (mock) mp_conn as
+ well as to check what is written to it.
+ """
+
+ def __init__(self, read_data):
+ """Constructs an instance.
+
+ Args:
+ read_data: bytes that should be returned when read* methods are
+ called.
+ """
+
+ _MockConnBase.__init__(self)
+ self._read_data = read_data
+ self._read_pos = 0
+
+ def readline(self):
+ """Override mod_python.apache.mp_conn.readline."""
+
+ if self._read_pos >= len(self._read_data):
+ return ''
+ end_index = self._read_data.find('\n', self._read_pos) + 1
+ if not end_index:
+ end_index = len(self._read_data)
+ return self._read_up_to(end_index)
+
+ def read(self, length):
+ """Override mod_python.apache.mp_conn.read."""
+
+ if self._read_pos >= len(self._read_data):
+ return ''
+ end_index = min(len(self._read_data), self._read_pos + length)
+ return self._read_up_to(end_index)
+
+ def _read_up_to(self, end_index):
+ line = self._read_data[self._read_pos:end_index]
+ self._read_pos = end_index
+ return line
+
+
+class MockBlockingConn(_MockConnBase):
+ """Blocking mock for mod_python.apache.mp_conn.
+
+ This enables tests to specify what should be read from a (mock) mp_conn as
+ well as to check what is written to it.
+ Callers of read* methods will block if there is no bytes available.
+ """
+
+ def __init__(self):
+ _MockConnBase.__init__(self)
+ self._queue = Queue.Queue()
+
+ def readline(self):
+ """Override mod_python.apache.mp_conn.readline."""
+ line = ''
+ while True:
+ c = self._queue.get()
+ line += c
+ if c == '\n':
+ return line
+
+ def read(self, length):
+ """Override mod_python.apache.mp_conn.read."""
+
+ data = ''
+ for unused in range(length):
+ data += self._queue.get()
+ return data
+
+ def put_bytes(self, bytes):
+ """Put bytes to be read from this mock.
+
+ Args:
+ bytes: bytes to be read.
+ """
+
+ for byte in bytes:
+ self._queue.put(byte)
+
+
+class MockTable(dict):
+ """Mock table.
+
+ This mimics mod_python mp_table. Note that only the methods used by
+ tests are overridden.
+ """
+
+ def __init__(self, copy_from={}):
+ if isinstance(copy_from, dict):
+ copy_from = copy_from.items()
+ for key, value in copy_from:
+ self.__setitem__(key, value)
+
+ def __getitem__(self, key):
+ return super(MockTable, self).__getitem__(key.lower())
+
+ def __setitem__(self, key, value):
+ super(MockTable, self).__setitem__(key.lower(), value)
+
+ def get(self, key, def_value=None):
+ return super(MockTable, self).get(key.lower(), def_value)
+
+
+class MockRequest(object):
+ """Mock request.
+
+ This mimics mod_python request.
+ """
+
+ def __init__(self, uri=None, headers_in={}, connection=None, method='GET',
+ protocol='HTTP/1.1', is_https=False):
+ """Construct an instance.
+
+ Arguments:
+ uri: URI of the request.
+ headers_in: Request headers.
+ connection: Connection used for the request.
+ method: request method.
+ is_https: Whether this request is over SSL.
+
+ See the document of mod_python Request for details.
+ """
+ self.uri = uri
+ self.unparsed_uri = uri
+ self.connection = connection
+ self.method = method
+ self.protocol = protocol
+ self.headers_in = MockTable(headers_in)
+ # self.is_https_ needs to be accessible from tests. To avoid name
+ # conflict with self.is_https(), it is named as such.
+ self.is_https_ = is_https
+ self.ws_stream = StreamHixie75(self, True)
+ self.ws_close_code = None
+ self.ws_close_reason = None
+ self.ws_version = common.VERSION_HYBI00
+ self.ws_deflate = False
+
+ def is_https(self):
+ """Return whether this request is over SSL."""
+ return self.is_https_
+
+
+class MockDispatcher(object):
+ """Mock for dispatch.Dispatcher."""
+
+ def __init__(self):
+ self.do_extra_handshake_called = False
+
+ def do_extra_handshake(self, conn_context):
+ self.do_extra_handshake_called = True
+
+ def transfer_data(self, conn_context):
+ pass
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/mux_client_for_testing.py b/testing/web-platform/tests/tools/pywebsocket/src/test/mux_client_for_testing.py
new file mode 100644
index 000000000..dd5435a8c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/mux_client_for_testing.py
@@ -0,0 +1,690 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket client utility for testing mux extension.
+
+This code should be independent from mod_pywebsocket. See the comment of
+client_for_testing.py.
+
+NOTE: This code is far from robust like client_for_testing.py.
+"""
+
+
+
+import Queue
+import base64
+import collections
+import email
+import email.parser
+import logging
+import math
+import os
+import random
+import socket
+import struct
+import threading
+
+from mod_pywebsocket import util
+
+from test import client_for_testing
+
+
+_CONTROL_CHANNEL_ID = 0
+_DEFAULT_CHANNEL_ID = 1
+
+_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
+_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
+_MUX_OPCODE_FLOW_CONTROL = 2
+_MUX_OPCODE_DROP_CHANNEL = 3
+_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
+
+
+class _ControlBlock:
+ def __init__(self, opcode):
+ self.opcode = opcode
+
+
+def _parse_handshake_response(response):
+ status_line, header_lines = response.split('\r\n', 1)
+
+ words = status_line.split(' ')
+ if len(words) < 3:
+ raise ValueError('Bad Status-Line syntax %r' % status_line)
+ [version, response_code] = words[:2]
+ if version != 'HTTP/1.1':
+ raise ValueError('Bad response version %r' % version)
+
+ if response_code != '101':
+ raise ValueError('Bad response code %r ' % response_code)
+ headers = email.parser.Parser().parsestr(header_lines)
+ return headers
+
+
+def _parse_channel_id(data, offset=0):
+ length = len(data)
+ remaining = length - offset
+
+ if remaining <= 0:
+ raise Exception('No channel id found')
+
+ channel_id = ord(data[offset])
+ channel_id_length = 1
+ if channel_id & 0xe0 == 0xe0:
+ if remaining < 4:
+ raise Exception('Invalid channel id format')
+ channel_id = struct.unpack('!L',
+ data[offset:offset+4])[0] & 0x1fffffff
+ channel_id_length = 4
+ elif channel_id & 0xc0 == 0xc0:
+ if remaining < 3:
+ raise Exception('Invalid channel id format')
+ channel_id = (((channel_id & 0x1f) << 16) +
+ struct.unpack('!H', data[offset+1:offset+3])[0])
+ channel_id_length = 3
+ elif channel_id & 0x80 == 0x80:
+ if remaining < 2:
+ raise Exception('Invalid channel id format')
+ channel_id = struct.unpack('!H', data[offset:offset+2])[0] & 0x3fff
+ channel_id_length = 2
+
+ return channel_id, channel_id_length
+
+
+def _parse_number(data, offset=0):
+ first_byte = ord(data[offset])
+ if (first_byte & 0x80) != 0:
+ raise Exception('The MSB of number field must be unset')
+ first_byte = first_byte & 0x7f
+ if first_byte == 127:
+ if offset + 9 > len(data):
+ raise Exception('Invalid number')
+ return struct.unpack('!Q', data[offset+1:offset+9])[0], 9
+ if first_byte == 126:
+ if offset + 3 > len(data):
+ raise Exception('Invalid number')
+ return struct.unpack('!H', data[offset+1:offset+3])[0], 3
+ return first_byte, 1
+
+
+def _parse_size_and_contents(data, offset=0):
+ size, advance = _parse_number(data, offset)
+ start_position = offset + advance
+ end_position = start_position + size
+ if len(data) < end_position:
+ raise Exception('Invalid size of control block (%d < %d)' % (
+ len(data), end_position))
+ return data[start_position:end_position], size + advance
+
+
+def _parse_control_blocks(data):
+ blocks = []
+ length = len(data)
+ pos = 0
+
+ while pos < length:
+ first_byte = ord(data[pos])
+ pos += 1
+ opcode = (first_byte >> 5) & 0x7
+ block = _ControlBlock(opcode)
+
+ # TODO(bashi): Support more opcode
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ block.encode = first_byte & 3
+ block.rejected = (first_byte >> 4) & 1
+
+ channel_id, advance = _parse_channel_id(data, pos)
+ block.channel_id = channel_id
+ pos += advance
+
+ encoded_handshake, advance = _parse_size_and_contents(data, pos)
+ block.encoded_handshake = encoded_handshake
+ pos += advance
+ blocks.append(block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ block.mux_error = (first_byte >> 4) & 1
+
+ channel_id, advance = _parse_channel_id(data, pos)
+ block.channel_id = channel_id
+ pos += advance
+
+ reason, advance = _parse_size_and_contents(data, pos)
+ if len(reason) == 0:
+ block.drop_code = None
+ block.drop_message = ''
+ elif len(reason) >= 2:
+ block.drop_code = struct.unpack('!H', reason[:2])[0]
+ block.drop_message = reason[2:]
+ else:
+ raise Exception('Invalid DropChannel')
+ pos += advance
+ blocks.append(block)
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ channel_id, advance = _parse_channel_id(data, pos)
+ block.channel_id = channel_id
+ pos += advance
+ send_quota, advance = _parse_number(data, pos)
+ block.send_quota = send_quota
+ pos += advance
+ blocks.append(block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ fallback = first_byte & 1
+ slots, advance = _parse_number(data, pos)
+ pos += advance
+ send_quota, advance = _parse_number(data, pos)
+ pos += advance
+ if fallback == 1 and (slots != 0 or send_quota != 0):
+ raise Exception('slots and send_quota must be zero if F bit '
+ 'is set')
+ block.fallback = fallback
+ block.slots = slots
+ block.send_quota = send_quota
+ blocks.append(block)
+ else:
+ raise Exception(
+ 'Unsupported mux opcode %d received' % opcode)
+
+ return blocks
+
+
+def _encode_channel_id(channel_id):
+ if channel_id < 0:
+ raise ValueError('Channel id %d must not be negative' % channel_id)
+
+ if channel_id < 2 ** 7:
+ return chr(channel_id)
+ if channel_id < 2 ** 14:
+ return struct.pack('!H', 0x8000 + channel_id)
+ if channel_id < 2 ** 21:
+ first = chr(0xc0 + (channel_id >> 16))
+ return first + struct.pack('!H', channel_id & 0xffff)
+ if channel_id < 2 ** 29:
+ return struct.pack('!L', 0xe0000000 + channel_id)
+
+ raise ValueError('Channel id %d is too large' % channel_id)
+
+
+def _encode_number(number):
+ if number <= 125:
+ return chr(number)
+ elif number < (1 << 16):
+ return chr(0x7e) + struct.pack('!H', number)
+ elif number < (1 << 63):
+ return chr(0x7f) + struct.pack('!Q', number)
+ else:
+ raise Exception('Invalid number')
+
+
+def _create_add_channel_request(channel_id, encoded_handshake,
+ encoding=0):
+ length = len(encoded_handshake)
+ handshake_length = _encode_number(length)
+
+ first_byte = (_MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding
+ return (chr(first_byte) + _encode_channel_id(channel_id) +
+ handshake_length + encoded_handshake)
+
+
+def _create_flow_control(channel_id, replenished_quota):
+ first_byte = (_MUX_OPCODE_FLOW_CONTROL << 5)
+ return (chr(first_byte) + _encode_channel_id(channel_id) +
+ _encode_number(replenished_quota))
+
+
+class _MuxReaderThread(threading.Thread):
+ """Mux reader thread.
+
+ Reads frames and passes them to the mux client. This thread accesses
+ private functions/variables of the mux client.
+ """
+
+ def __init__(self, mux):
+ threading.Thread.__init__(self)
+ self.setDaemon(True)
+ self._mux = mux
+ self._stop_requested = False
+
+ def _receive_message(self):
+ first_opcode = None
+ pending_payload = []
+ while not self._stop_requested:
+ fin, rsv1, rsv2, rsv3, opcode, payload_length = (
+ client_for_testing.read_frame_header(self._mux._socket))
+
+ if not first_opcode:
+ if opcode == client_for_testing.OPCODE_TEXT:
+ raise Exception('Received a text message on physical '
+ 'connection')
+ if opcode == client_for_testing.OPCODE_CONTINUATION:
+ raise Exception('Received an intermediate frame but '
+ 'fragmentation was not started')
+ if (opcode == client_for_testing.OPCODE_BINARY or
+ opcode == client_for_testing.OPCODE_PONG or
+ opcode == client_for_testing.OPCODE_PONG or
+ opcode == client_for_testing.OPCODE_CLOSE):
+ first_opcode = opcode
+ else:
+ raise Exception('Received an undefined opcode frame: %d' %
+ opcode)
+
+ elif opcode != client_for_testing.OPCODE_CONTINUATION:
+ raise Exception('Received a new opcode before '
+ 'terminating fragmentation')
+
+ payload = client_for_testing.receive_bytes(
+ self._mux._socket, payload_length)
+
+ if self._mux._incoming_frame_filter is not None:
+ payload = self._mux._incoming_frame_filter.filter(payload)
+
+ pending_payload.append(payload)
+
+ if fin:
+ break
+
+ if self._stop_requested:
+ return None, None
+
+ message = ''.join(pending_payload)
+ return first_opcode, message
+
+ def request_stop(self):
+ self._stop_requested = True
+
+ def run(self):
+ try:
+ while not self._stop_requested:
+ # opcode is OPCODE_BINARY or control opcodes when a message
+ # is succesfully received.
+ opcode, message = self._receive_message()
+ if not opcode:
+ return
+ if opcode == client_for_testing.OPCODE_BINARY:
+ channel_id, advance = _parse_channel_id(message)
+ self._mux._dispatch_frame(channel_id, message[advance:])
+ else:
+ self._mux._process_control_message(opcode, message)
+ finally:
+ self._mux._notify_reader_done()
+
+
+class _InnerFrame(object):
+ def __init__(self, fin, rsv1, rsv2, rsv3, opcode, payload):
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.payload = payload
+
+
+class _LogicalChannelData(object):
+ def __init__(self):
+ self.queue = Queue.Queue()
+ self.send_quota = 0
+ self.receive_quota = 0
+
+
+class MuxClient(object):
+ """WebSocket mux client.
+
+ Note that this class is NOT thread-safe. Do not access an instance of this
+ class from multiple threads at a same time.
+ """
+
+ def __init__(self, options):
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+ self._options.enable_mux()
+ self._stream = None
+ self._socket = None
+ self._handshake = client_for_testing.WebSocketHandshake(self._options)
+ self._incoming_frame_filter = None
+ self._outgoing_frame_filter = None
+
+ self._is_active = False
+ self._read_thread = None
+ self._control_blocks_condition = threading.Condition()
+ self._control_blocks = []
+ self._channel_slots = collections.deque()
+ self._logical_channels_condition = threading.Condition();
+ self._logical_channels = {}
+ self._timeout = 2
+ self._physical_connection_close_event = None
+ self._physical_connection_close_message = None
+
+ def _parse_inner_frame(self, data):
+ if len(data) == 0:
+ raise Exception('Invalid encapsulated frame received')
+
+ first_byte = ord(data[0])
+ fin = (first_byte << 7) & 1
+ rsv1 = (first_byte << 6) & 1
+ rsv2 = (first_byte << 5) & 1
+ rsv3 = (first_byte << 4) & 1
+ opcode = first_byte & 0xf
+
+ if self._outgoing_frame_filter:
+ payload = self._outgoing_frame_filter.filter(
+ data[1:])
+ else:
+ payload = data[1:]
+
+ return _InnerFrame(fin, rsv1, rsv2, rsv3, opcode, payload)
+
+ def _process_mux_control_blocks(self):
+ for block in self._control_blocks:
+ if block.opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ # AddChannelResponse will be handled in add_channel().
+ continue
+ elif block.opcode == _MUX_OPCODE_FLOW_CONTROL:
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ raise Exception('Invalid flow control received for '
+ 'channel id %d' % block.channel_id)
+ self._logical_channels[block.channel_id].send_quota += (
+ block.send_quota)
+ self._logical_channels_condition.notify()
+ finally:
+ self._logical_channels_condition.release()
+ elif block.opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ self._channel_slots.extend([block.send_quota] * block.slots)
+
+ def _dispatch_frame(self, channel_id, payload):
+ if channel_id == _CONTROL_CHANNEL_ID:
+ try:
+ self._control_blocks_condition.acquire()
+ self._control_blocks += _parse_control_blocks(payload)
+ self._process_mux_control_blocks()
+ self._control_blocks_condition.notify()
+ finally:
+ self._control_blocks_condition.release()
+ else:
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ raise Exception('Received logical frame on channel id '
+ '%d, which is not established' %
+ channel_id)
+
+ inner_frame = self._parse_inner_frame(payload)
+ self._logical_channels[channel_id].receive_quota -= (
+ len(inner_frame.payload))
+ if self._logical_channels[channel_id].receive_quota < 0:
+ raise Exception('The server violates quota on '
+ 'channel id %d' % channel_id)
+ finally:
+ self._logical_channels_condition.release()
+ self._logical_channels[channel_id].queue.put(inner_frame)
+
+ def _process_control_message(self, opcode, message):
+ # Ping/Pong are not supported.
+ if opcode == client_for_testing.OPCODE_CLOSE:
+ self._physical_connection_close_message = message
+ if self._is_active:
+ self._stream.send_close(
+ code=client_for_testing.STATUS_NORMAL_CLOSURE, reason='')
+ self._read_thread.request_stop()
+
+ if self._physical_connection_close_event:
+ self._physical_connection_close_event.set()
+
+ def _notify_reader_done(self):
+ self._logger.debug('Read thread terminated.')
+ self.close_socket()
+
+ def _assert_channel_slot_available(self):
+ try:
+ self._control_blocks_condition.acquire()
+ if len(self._channel_slots) == 0:
+ # Wait once
+ self._control_blocks_condition.wait(timeout=self._timeout)
+ finally:
+ self._control_blocks_condition.release()
+
+ if len(self._channel_slots) == 0:
+ raise Exception('Failed to receive NewChannelSlot')
+
+ def _assert_send_quota_available(self, channel_id):
+ try:
+ self._logical_channels_condition.acquire()
+ if self._logical_channels[channel_id].send_quota == 0:
+ # Wait once
+ self._logical_channels_condition.wait(timeout=self._timeout)
+ finally:
+ self._logical_channels_condition.release()
+
+ if self._logical_channels[channel_id].send_quota == 0:
+ raise Exception('Failed to receive FlowControl for channel id %d' %
+ channel_id)
+
+ def connect(self):
+ self._socket = client_for_testing.connect_socket_with_retry(
+ self._options.server_host,
+ self._options.server_port,
+ self._options.socket_timeout,
+ self._options.use_tls)
+
+ self._handshake.handshake(self._socket)
+ self._stream = client_for_testing.WebSocketStream(
+ self._socket, self._handshake)
+
+ self._logical_channels[_DEFAULT_CHANNEL_ID] = _LogicalChannelData()
+
+ self._read_thread = _MuxReaderThread(self)
+ self._read_thread.start()
+
+ self._assert_channel_slot_available()
+ self._assert_send_quota_available(_DEFAULT_CHANNEL_ID)
+
+ self._is_active = True
+ self._logger.info('Connection established')
+
+ def add_channel(self, channel_id, options):
+ if not self._is_active:
+ raise Exception('Mux client is not active')
+
+ if channel_id in self._logical_channels:
+ raise Exception('Channel id %d already exists' % channel_id)
+
+ try:
+ send_quota = self._channel_slots.popleft()
+ except IndexError, e:
+ raise Exception('No channel slots: %r' % e)
+
+ # Create AddChannel request
+ request_line = 'GET %s HTTP/1.1\r\n' % options.resource
+ fields = []
+ if options.server_port == client_for_testing.DEFAULT_PORT:
+ fields.append('Host: %s\r\n' % options.server_host.lower())
+ else:
+ fields.append('Host: %s:%d\r\n' % (options.server_host.lower(),
+ options.server_port))
+ fields.append('Origin: %s\r\n' % options.origin.lower())
+ fields.append('Connection: Upgrade\r\n')
+
+ if len(options.extensions) > 0:
+ fields.append('Sec-WebSocket-Extensions: %s\r\n' %
+ ', '.join(options.extensions))
+
+ handshake = request_line + ''.join(fields) + '\r\n'
+ add_channel_request = _create_add_channel_request(
+ channel_id, handshake)
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + add_channel_request
+ self._stream.send_binary(payload)
+
+ # Wait AddChannelResponse
+ self._logger.debug('Waiting AddChannelResponse for the request...')
+ response = None
+ try:
+ self._control_blocks_condition.acquire()
+ while True:
+ for block in self._control_blocks:
+ if block.opcode != _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ continue
+ if block.channel_id == channel_id:
+ response = block
+ self._control_blocks.remove(response)
+ break
+ if response:
+ break
+ self._control_blocks_condition.wait(self._timeout)
+ if not self._is_active:
+ raise Exception('AddChannelRequest timed out')
+ finally:
+ self._control_blocks_condition.release()
+
+ # Validate AddChannelResponse
+ if response.rejected:
+ raise Exception('The server rejected AddChannelRequest')
+
+ fields = _parse_handshake_response(response.encoded_handshake)
+
+ # Should we reject when Upgrade, Connection, or Sec-WebSocket-Accept
+ # headers exist?
+
+ self._logical_channels_condition.acquire()
+ self._logical_channels[channel_id] = _LogicalChannelData()
+ self._logical_channels[channel_id].send_quota = send_quota
+ self._logical_channels_condition.release()
+
+ self._logger.debug('Logical channel %d established' % channel_id)
+
+ def _check_logical_channel_is_opened(self, channel_id):
+ if not self._is_active:
+ raise Exception('Mux client is not active')
+
+ if not channel_id in self._logical_channels:
+ raise Exception('Logical channel %d is not established.')
+
+ def drop_channel(self, channel_id):
+ # TODO(bashi): Implement
+ pass
+
+ def send_flow_control(self, channel_id, replenished_quota):
+ self._check_logical_channel_is_opened(channel_id)
+ flow_control = _create_flow_control(channel_id, replenished_quota)
+ payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + flow_control
+ # Replenish receive quota
+ try:
+ self._logical_channels_condition.acquire()
+ self._logical_channels[channel_id].receive_quota += (
+ replenished_quota)
+ finally:
+ self._logical_channels_condition.release()
+ self._stream.send_binary(payload)
+
+ def send_message(self, channel_id, message, end=True, binary=False):
+ self._check_logical_channel_is_opened(channel_id)
+
+ if binary:
+ first_byte = (end << 7) | client_for_testing.OPCODE_BINARY
+ else:
+ first_byte = (end << 7) | client_for_testing.OPCODE_TEXT
+ message = message.encode('utf-8')
+
+ try:
+ self._logical_channels_condition.acquire()
+ if self._logical_channels[channel_id].send_quota < len(message):
+ raise Exception('Send quota violation: %d < %d' % (
+ self._logical_channels[channel_id].send_quota,
+ len(message)))
+
+ self._logical_channels[channel_id].send_quota -= len(message)
+ finally:
+ self._logical_channels_condition.release()
+ payload = _encode_channel_id(channel_id) + chr(first_byte) + message
+ self._stream.send_binary(payload)
+
+ def assert_receive(self, channel_id, payload, binary=False):
+ self._check_logical_channel_is_opened(channel_id)
+
+ try:
+ inner_frame = self._logical_channels[channel_id].queue.get(
+ timeout=self._timeout)
+ except Queue.Empty, e:
+ raise Exception('Cannot receive message from channel id %d' %
+ channel_id)
+
+ if binary:
+ opcode = client_for_testing.OPCODE_BINARY
+ else:
+ opcode = client_for_testing.OPCODE_TEXT
+
+ if inner_frame.opcode != opcode:
+ raise Exception('Unexpected opcode received (%r != %r)' %
+ (expected_opcode, inner_frame.opcode))
+
+ if inner_frame.payload != payload:
+ raise Exception('Unexpected payload received')
+
+ def send_close(self, channel_id, code=None, reason=''):
+ self._check_logical_channel_is_opened(channel_id)
+
+ if code is not None:
+ body = struct.pack('!H', code) + reason.encode('utf-8')
+ else:
+ body = ''
+
+ first_byte = (1 << 7) | client_for_testing.OPCODE_CLOSE
+ payload = _encode_channel_id(channel_id) + chr(first_byte) + body
+ self._stream.send_binary(payload)
+
+ def assert_receive_close(self, channel_id):
+ self._check_logical_channel_is_opened(channel_id)
+
+ try:
+ inner_frame = self._logical_channels[channel_id].queue.get(
+ timeout=self._timeout)
+ except Queue.Empty, e:
+ raise Exception('Cannot receive message from channel id %d' %
+ channel_id)
+ if inner_frame.opcode != client_for_testing.OPCODE_CLOSE:
+ raise Exception('Didn\'t receive close frame')
+
+ def send_physical_connection_close(self, code=None, reason=''):
+ self._physical_connection_close_event = threading.Event()
+ self._stream.send_close(code, reason)
+
+ # This method can be used only after calling
+ # send_physical_connection_close().
+ def assert_physical_connection_receive_close(
+ self, code=client_for_testing.STATUS_NORMAL_CLOSURE, reason=''):
+ self._physical_connection_close_event.wait(timeout=self._timeout)
+ if (not self._physical_connection_close_event.isSet() or
+ not self._physical_connection_close_message):
+ raise Exception('Didn\'t receive closing handshake')
+
+ def close_socket(self):
+ self._is_active = False
+ self._socket.close()
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/run_all.py b/testing/web-platform/tests/tools/pywebsocket/src/test/run_all.py
new file mode 100755
index 000000000..80a5d87d8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/run_all.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Run all tests in the same directory.
+
+This suite is expected to be run under pywebsocket's src directory, i.e. the
+directory containing mod_pywebsocket, test, etc.
+
+To change loggin level, please specify --log-level option.
+ python test/run_test.py --log-level debug
+
+To pass any option to unittest module, please specify options after '--'. For
+example, run this for making the test runner verbose.
+ python test/run_test.py --log-level debug -- -v
+"""
+
+
+import logging
+import optparse
+import os
+import re
+import sys
+import unittest
+
+
+_TEST_MODULE_PATTERN = re.compile(r'^(test_.+)\.py$')
+
+
+def _list_test_modules(directory):
+ module_names = []
+ for filename in os.listdir(directory):
+ match = _TEST_MODULE_PATTERN.search(filename)
+ if match:
+ module_names.append(match.group(1))
+ return module_names
+
+
+def _suite():
+ loader = unittest.TestLoader()
+ return loader.loadTestsFromNames(
+ _list_test_modules(os.path.join(os.path.split(__file__)[0], '.')))
+
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser()
+ parser.add_option('--log-level', '--log_level', type='choice',
+ dest='log_level', default='warning',
+ choices=['debug', 'info', 'warning', 'warn', 'error',
+ 'critical'])
+ options, args = parser.parse_args()
+ logging.basicConfig(
+ level=logging.getLevelName(options.log_level.upper()),
+ format='%(levelname)s %(asctime)s '
+ '%(filename)s:%(lineno)d] '
+ '%(message)s',
+ datefmt='%H:%M:%S')
+ unittest.main(defaultTest='_suite', argv=[sys.argv[0]] + args)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/set_sys_path.py b/testing/web-platform/tests/tools/pywebsocket/src/test/set_sys_path.py
new file mode 100644
index 000000000..e3c6db9ea
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/set_sys_path.py
@@ -0,0 +1,45 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Configuration for testing.
+
+Test files should import this module before mod_pywebsocket.
+"""
+
+
+import os
+import sys
+
+
+# Add the parent directory to sys.path to enable importing mod_pywebsocket.
+sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_dispatch.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_dispatch.py
new file mode 100755
index 000000000..9ca3d4f3a
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_dispatch.py
@@ -0,0 +1,288 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for dispatch module."""
+
+
+import os
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from test import mock
+
+
+_TEST_HANDLERS_DIR = os.path.join(
+ os.path.split(__file__)[0], 'testdata', 'handlers')
+
+_TEST_HANDLERS_SUB_DIR = os.path.join(_TEST_HANDLERS_DIR, 'sub')
+
+
+class DispatcherTest(unittest.TestCase):
+ """A unittest for dispatch module."""
+
+ def test_normalize_path(self):
+ self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
+ dispatch._normalize_path('/a/b'))
+ self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
+ dispatch._normalize_path('\\a\\b'))
+ self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
+ dispatch._normalize_path('/a/c/../b'))
+ self.assertEqual(os.path.abspath('abc').replace('\\', '/'),
+ dispatch._normalize_path('abc'))
+
+ def test_converter(self):
+ converter = dispatch._create_path_to_resource_converter('/a/b')
+ # Python built by MSC inserts a drive name like 'C:\' via realpath().
+ # Converter Generator expands provided path using realpath() and uses
+ # the path including a drive name to verify the prefix.
+ os_root = os.path.realpath('/')
+ self.assertEqual('/h', converter(os_root + 'a/b/h_wsh.py'))
+ self.assertEqual('/c/h', converter(os_root + 'a/b/c/h_wsh.py'))
+ self.assertEqual(None, converter(os_root + 'a/b/h.py'))
+ self.assertEqual(None, converter('a/b/h_wsh.py'))
+
+ converter = dispatch._create_path_to_resource_converter('a/b')
+ self.assertEqual('/h', converter(dispatch._normalize_path(
+ 'a/b/h_wsh.py')))
+
+ converter = dispatch._create_path_to_resource_converter('/a/b///')
+ self.assertEqual('/h', converter(os_root + 'a/b/h_wsh.py'))
+ self.assertEqual('/h', converter(dispatch._normalize_path(
+ '/a/b/../b/h_wsh.py')))
+
+ converter = dispatch._create_path_to_resource_converter(
+ '/a/../a/b/../b/')
+ self.assertEqual('/h', converter(os_root + 'a/b/h_wsh.py'))
+
+ converter = dispatch._create_path_to_resource_converter(r'\a\b')
+ self.assertEqual('/h', converter(os_root + r'a\b\h_wsh.py'))
+ self.assertEqual('/h', converter(os_root + r'a/b/h_wsh.py'))
+
+ def test_enumerate_handler_file_paths(self):
+ paths = list(
+ dispatch._enumerate_handler_file_paths(_TEST_HANDLERS_DIR))
+ paths.sort()
+ self.assertEqual(8, len(paths))
+ expected_paths = [
+ os.path.join(_TEST_HANDLERS_DIR, 'abort_by_user_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'blank_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'origin_check_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'sub',
+ 'exception_in_transfer_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'sub', 'non_callable_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'sub', 'plain_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'sub',
+ 'wrong_handshake_sig_wsh.py'),
+ os.path.join(_TEST_HANDLERS_DIR, 'sub',
+ 'wrong_transfer_sig_wsh.py'),
+ ]
+ for expected, actual in zip(expected_paths, paths):
+ self.assertEqual(expected, actual)
+
+ def test_source_handler_file(self):
+ self.assertRaises(
+ dispatch.DispatchException, dispatch._source_handler_file, '')
+ self.assertRaises(
+ dispatch.DispatchException, dispatch._source_handler_file, 'def')
+ self.assertRaises(
+ dispatch.DispatchException, dispatch._source_handler_file, '1/0')
+ self.failUnless(dispatch._source_handler_file(
+ 'def web_socket_do_extra_handshake(request):pass\n'
+ 'def web_socket_transfer_data(request):pass\n'))
+
+ def test_source_warnings(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ warnings = dispatcher.source_warnings()
+ warnings.sort()
+ expected_warnings = [
+ (os.path.realpath(os.path.join(
+ _TEST_HANDLERS_DIR, 'blank_wsh.py')) +
+ ': web_socket_do_extra_handshake is not defined.'),
+ (os.path.realpath(os.path.join(
+ _TEST_HANDLERS_DIR, 'sub', 'non_callable_wsh.py')) +
+ ': web_socket_do_extra_handshake is not callable.'),
+ (os.path.realpath(os.path.join(
+ _TEST_HANDLERS_DIR, 'sub', 'wrong_handshake_sig_wsh.py')) +
+ ': web_socket_do_extra_handshake is not defined.'),
+ (os.path.realpath(os.path.join(
+ _TEST_HANDLERS_DIR, 'sub', 'wrong_transfer_sig_wsh.py')) +
+ ': web_socket_transfer_data is not defined.'),
+ ]
+ self.assertEquals(4, len(warnings))
+ for expected, actual in zip(expected_warnings, warnings):
+ self.assertEquals(expected, actual)
+
+ def test_do_extra_handshake(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ request = mock.MockRequest()
+ request.ws_resource = '/origin_check'
+ request.ws_origin = 'http://example.com'
+ dispatcher.do_extra_handshake(request) # Must not raise exception.
+
+ request.ws_origin = 'http://bad.example.com'
+ try:
+ dispatcher.do_extra_handshake(request)
+ self.fail('Could not catch HandshakeException with 403 status')
+ except handshake.HandshakeException, e:
+ self.assertEquals(403, e.status)
+ except Exception, e:
+ self.fail('Unexpected exception: %r' % e)
+
+ def test_abort_extra_handshake(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ request = mock.MockRequest()
+ request.ws_resource = '/abort_by_user'
+ self.assertRaises(handshake.AbortedByUserException,
+ dispatcher.do_extra_handshake, request)
+
+ def test_transfer_data(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+
+ request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
+ request.ws_resource = '/origin_check'
+ request.ws_protocol = 'p1'
+ dispatcher.transfer_data(request)
+ self.assertEqual('origin_check_wsh.py is called for /origin_check, p1'
+ '\xff\x00',
+ request.connection.written_data())
+
+ request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
+ request.ws_resource = '/sub/plain'
+ request.ws_protocol = None
+ dispatcher.transfer_data(request)
+ self.assertEqual('sub/plain_wsh.py is called for /sub/plain, None'
+ '\xff\x00',
+ request.connection.written_data())
+
+ request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
+ request.ws_resource = '/sub/plain?'
+ request.ws_protocol = None
+ dispatcher.transfer_data(request)
+ self.assertEqual('sub/plain_wsh.py is called for /sub/plain?, None'
+ '\xff\x00',
+ request.connection.written_data())
+
+ request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
+ request.ws_resource = '/sub/plain?q=v'
+ request.ws_protocol = None
+ dispatcher.transfer_data(request)
+ self.assertEqual('sub/plain_wsh.py is called for /sub/plain?q=v, None'
+ '\xff\x00',
+ request.connection.written_data())
+
+ def test_transfer_data_no_handler(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ for resource in ['/blank', '/sub/non_callable',
+ '/sub/no_wsh_at_the_end', '/does/not/exist']:
+ request = mock.MockRequest(connection=mock.MockConn(''))
+ request.ws_resource = resource
+ request.ws_protocol = 'p2'
+ try:
+ dispatcher.transfer_data(request)
+ self.fail()
+ except dispatch.DispatchException, e:
+ self.failUnless(str(e).find('No handler') != -1)
+ except Exception:
+ self.fail()
+
+ def test_transfer_data_handler_exception(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ request = mock.MockRequest(connection=mock.MockConn(''))
+ request.ws_resource = '/sub/exception_in_transfer'
+ request.ws_protocol = 'p3'
+ try:
+ dispatcher.transfer_data(request)
+ self.fail()
+ except Exception, e:
+ self.failUnless(str(e).find('Intentional') != -1,
+ 'Unexpected exception: %s' % e)
+
+ def test_abort_transfer_data(self):
+ dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ request = mock.MockRequest()
+ request.ws_resource = '/abort_by_user'
+ self.assertRaises(handshake.AbortedByUserException,
+ dispatcher.transfer_data, request)
+
+ def test_scan_dir(self):
+ disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ self.assertEqual(4, len(disp._handler_suite_map))
+ self.failUnless('/origin_check' in disp._handler_suite_map)
+ self.failUnless(
+ '/sub/exception_in_transfer' in disp._handler_suite_map)
+ self.failUnless('/sub/plain' in disp._handler_suite_map)
+
+ def test_scan_sub_dir(self):
+ disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, _TEST_HANDLERS_SUB_DIR)
+ self.assertEqual(2, len(disp._handler_suite_map))
+ self.failIf('/origin_check' in disp._handler_suite_map)
+ self.failUnless(
+ '/sub/exception_in_transfer' in disp._handler_suite_map)
+ self.failUnless('/sub/plain' in disp._handler_suite_map)
+
+ def test_scan_sub_dir_as_root(self):
+ disp = dispatch.Dispatcher(_TEST_HANDLERS_SUB_DIR,
+ _TEST_HANDLERS_SUB_DIR)
+ self.assertEqual(2, len(disp._handler_suite_map))
+ self.failIf('/origin_check' in disp._handler_suite_map)
+ self.failIf('/sub/exception_in_transfer' in disp._handler_suite_map)
+ self.failIf('/sub/plain' in disp._handler_suite_map)
+ self.failUnless('/exception_in_transfer' in disp._handler_suite_map)
+ self.failUnless('/plain' in disp._handler_suite_map)
+
+ def test_scan_dir_must_under_root(self):
+ dispatch.Dispatcher('a/b', 'a/b/c') # OK
+ dispatch.Dispatcher('a/b///', 'a/b') # OK
+ self.assertRaises(dispatch.DispatchException,
+ dispatch.Dispatcher, 'a/b/c', 'a/b')
+
+ def test_resource_path_alias(self):
+ disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
+ disp.add_resource_path_alias('/', '/origin_check')
+ self.assertEqual(5, len(disp._handler_suite_map))
+ self.failUnless('/origin_check' in disp._handler_suite_map)
+ self.failUnless(
+ '/sub/exception_in_transfer' in disp._handler_suite_map)
+ self.failUnless('/sub/plain' in disp._handler_suite_map)
+ self.failUnless('/' in disp._handler_suite_map)
+ self.assertRaises(dispatch.DispatchException,
+ disp.add_resource_path_alias, '/alias', '/not-exist')
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_endtoend.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_endtoend.py
new file mode 100755
index 000000000..5e5cf6157
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_endtoend.py
@@ -0,0 +1,753 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""End-to-end tests for pywebsocket. Tests standalone.py by default. You
+can also test mod_pywebsocket hosted on an Apache server by setting
+_use_external_server to True and modifying _external_server_port to point to
+the port on which the Apache server is running.
+"""
+
+
+import logging
+import os
+import signal
+import socket
+import subprocess
+import sys
+import time
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from test import client_for_testing
+from test import mux_client_for_testing
+
+
+# Special message that tells the echo server to start closing handshake
+_GOODBYE_MESSAGE = 'Goodbye'
+
+_SERVER_WARMUP_IN_SEC = 0.2
+
+# If you want to use external server to run end to end tests, set following
+# parameters correctly.
+_use_external_server = False
+_external_server_port = 0
+
+
+# Test body functions
+def _echo_check_procedure(client):
+ client.connect()
+
+ client.send_message('test')
+ client.assert_receive('test')
+ client.send_message('helloworld')
+ client.assert_receive('helloworld')
+
+ client.send_close()
+ client.assert_receive_close()
+
+ client.assert_connection_closed()
+
+
+def _echo_check_procedure_with_binary(client):
+ client.connect()
+
+ client.send_message('binary', binary=True)
+ client.assert_receive('binary', binary=True)
+ client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True)
+ client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ client.assert_connection_closed()
+
+
+def _echo_check_procedure_with_goodbye(client):
+ client.connect()
+
+ client.send_message('test')
+ client.assert_receive('test')
+
+ client.send_message(_GOODBYE_MESSAGE)
+ client.assert_receive(_GOODBYE_MESSAGE)
+
+ client.assert_receive_close()
+ client.send_close()
+
+ client.assert_connection_closed()
+
+
+def _echo_check_procedure_with_code_and_reason(client, code, reason):
+ client.connect()
+
+ client.send_close(code, reason)
+ client.assert_receive_close(code, reason)
+
+ client.assert_connection_closed()
+
+
+def _unmasked_frame_check_procedure(client):
+ client.connect()
+
+ client.send_message('test', mask=False)
+ client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '')
+
+ client.assert_connection_closed()
+
+
+def _mux_echo_check_procedure(mux_client):
+ mux_client.connect()
+ mux_client.send_flow_control(1, 1024)
+
+ logical_channel_options = client_for_testing.ClientOptions()
+ logical_channel_options.server_host = 'localhost'
+ logical_channel_options.server_port = 80
+ logical_channel_options.origin = 'http://localhost'
+ logical_channel_options.resource = '/echo'
+ mux_client.add_channel(2, logical_channel_options)
+ mux_client.send_flow_control(2, 1024)
+
+ mux_client.send_message(2, 'test')
+ mux_client.assert_receive(2, 'test')
+
+ mux_client.add_channel(3, logical_channel_options)
+ mux_client.send_flow_control(3, 1024)
+
+ mux_client.send_message(2, 'hello')
+ mux_client.send_message(3, 'world')
+ mux_client.assert_receive(2, 'hello')
+ mux_client.assert_receive(3, 'world')
+
+ # Don't send close message on channel id 1 so that server-initiated
+ # closing handshake won't occur.
+ mux_client.send_close(2)
+ mux_client.send_close(3)
+ mux_client.assert_receive_close(2)
+ mux_client.assert_receive_close(3)
+
+ mux_client.send_physical_connection_close()
+ mux_client.assert_physical_connection_receive_close()
+
+
+class EndToEndTestBase(unittest.TestCase):
+ """Base class for end-to-end tests that launch pywebsocket standalone
+ server as a separate process, connect to it using the client_for_testing
+ module, and check if the server behaves correctly by exchanging opening
+ handshake and frames over a TCP connection.
+ """
+
+ def setUp(self):
+ self.server_stderr = None
+ self.top_dir = os.path.join(os.path.split(__file__)[0], '..')
+ os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path))
+ self.standalone_command = os.path.join(
+ self.top_dir, 'mod_pywebsocket', 'standalone.py')
+ self.document_root = os.path.join(self.top_dir, 'example')
+ s = socket.socket()
+ s.bind(('localhost', 0))
+ (_, self.test_port) = s.getsockname()
+ s.close()
+
+ self._options = client_for_testing.ClientOptions()
+ self._options.server_host = 'localhost'
+ self._options.origin = 'http://localhost'
+ self._options.resource = '/echo'
+
+ # TODO(toyoshim): Eliminate launching a standalone server on using
+ # external server.
+
+ if _use_external_server:
+ self._options.server_port = _external_server_port
+ else:
+ self._options.server_port = self.test_port
+
+ # TODO(tyoshino): Use tearDown to kill the server.
+
+ def _run_python_command(self, commandline, stdout=None, stderr=None):
+ return subprocess.Popen([sys.executable] + commandline, close_fds=True,
+ stdout=stdout, stderr=stderr)
+
+ def _run_server(self):
+ args = [self.standalone_command,
+ '-H', 'localhost',
+ '-V', 'localhost',
+ '-p', str(self.test_port),
+ '-P', str(self.test_port),
+ '-d', self.document_root]
+
+ # Inherit the level set to the root logger by test runner.
+ root_logger = logging.getLogger()
+ log_level = root_logger.getEffectiveLevel()
+ if log_level != logging.NOTSET:
+ args.append('--log-level')
+ args.append(logging.getLevelName(log_level).lower())
+
+ return self._run_python_command(args,
+ stderr=self.server_stderr)
+
+ def _kill_process(self, pid):
+ if sys.platform in ('win32', 'cygwin'):
+ subprocess.call(
+ ('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True)
+ else:
+ os.kill(pid, signal.SIGKILL)
+
+
+class EndToEndHyBiTest(EndToEndTestBase):
+ def setUp(self):
+ EndToEndTestBase.setUp(self)
+
+ def _run_test_with_client_options(self, test_function, options):
+ server = self._run_server()
+ try:
+ # TODO(tyoshino): add some logic to poll the server until it
+ # becomes ready
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ client = client_for_testing.create_client(options)
+ try:
+ test_function(client)
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def _run_test(self, test_function):
+ self._run_test_with_client_options(test_function, self._options)
+
+ def _run_deflate_frame_test(self, test_function):
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ self._options.enable_deflate_frame()
+ client = client_for_testing.create_client(self._options)
+ try:
+ test_function(client)
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def _run_permessage_deflate_test(
+ self, offer, response_checker, test_function):
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ self._options.extensions += offer
+ self._options.check_permessage_deflate = response_checker
+ client = client_for_testing.create_client(self._options)
+
+ try:
+ client.connect()
+
+ if test_function is not None:
+ test_function(client)
+
+ client.assert_connection_closed()
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def _run_close_with_code_and_reason_test(self, test_function, code,
+ reason):
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ client = client_for_testing.create_client(self._options)
+ try:
+ test_function(client, code, reason)
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def _run_http_fallback_test(self, options, status):
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ client = client_for_testing.create_client(options)
+ try:
+ client.connect()
+ self.fail('Could not catch HttpStatusException')
+ except client_for_testing.HttpStatusException, e:
+ self.assertEqual(status, e.status)
+ except Exception, e:
+ self.fail('Catch unexpected exception')
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def _run_mux_test(self, test_function):
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ client = mux_client_for_testing.MuxClient(self._options)
+ try:
+ test_function(client)
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def test_echo(self):
+ self._run_test(_echo_check_procedure)
+
+ def test_echo_binary(self):
+ self._run_test(_echo_check_procedure_with_binary)
+
+ def test_echo_server_close(self):
+ self._run_test(_echo_check_procedure_with_goodbye)
+
+ def test_unmasked_frame(self):
+ self._run_test(_unmasked_frame_check_procedure)
+
+ def test_echo_deflate_frame(self):
+ self._run_deflate_frame_test(_echo_check_procedure)
+
+ def test_echo_deflate_frame_server_close(self):
+ self._run_deflate_frame_test(
+ _echo_check_procedure_with_goodbye)
+
+ def test_echo_permessage_deflate(self):
+ def test_function(client):
+ # From the examples in the spec.
+ compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
+ client._stream.send_data(
+ compressed_hello,
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ compressed_hello,
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ self.assertEquals('permessage-deflate', parameter.name())
+ self.assertEquals([], parameter.get_parameters())
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_two_frames(self):
+ def test_function(client):
+ # From the examples in the spec.
+ client._stream.send_data(
+ '\xf2\x48\xcd',
+ client_for_testing.OPCODE_TEXT,
+ end=False,
+ rsv1=1)
+ client._stream.send_data(
+ '\xc9\xc9\x07\x00',
+ client_for_testing.OPCODE_TEXT)
+ client._stream.assert_receive_binary(
+ '\xf2\x48\xcd\xc9\xc9\x07\x00',
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ self.assertEquals('permessage-deflate', parameter.name())
+ self.assertEquals([], parameter.get_parameters())
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_two_messages(self):
+ def test_function(client):
+ # From the examples in the spec.
+ client._stream.send_data(
+ '\xf2\x48\xcd\xc9\xc9\x07\x00',
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.send_data(
+ '\xf2\x00\x11\x00\x00',
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ '\xf2\x48\xcd\xc9\xc9\x07\x00',
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ '\xf2\x00\x11\x00\x00',
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ self.assertEquals('permessage-deflate', parameter.name())
+ self.assertEquals([], parameter.get_parameters())
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self):
+ def test_function(client):
+ # From the examples in the spec.
+ client._stream.send_data(
+ '\xf2\x48\xcd\xc9\xc9\x07\x00',
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.send_data(
+ '\xf2\x00\x11\x00\x00',
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ '\xf2\x48\xcd\xc9\xc9\x07\x00',
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ '\xf2\x48\xcd\xc9\xc9\x07\x00',
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ self.assertEquals('permessage-deflate', parameter.name())
+ self.assertEquals([('server_no_context_takeover', None)],
+ parameter.get_parameters())
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate; server_no_context_takeover'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_preference(self):
+ def test_function(client):
+ # From the examples in the spec.
+ compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
+ client._stream.send_data(
+ compressed_hello,
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ compressed_hello,
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ self.assertEquals('permessage-deflate', parameter.name())
+ self.assertEquals([], parameter.get_parameters())
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate', 'deflate-frame'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_with_parameters(self):
+ def test_function(client):
+ # From the examples in the spec.
+ compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
+ client._stream.send_data(
+ compressed_hello,
+ client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+ client._stream.assert_receive_binary(
+ compressed_hello,
+ opcode=client_for_testing.OPCODE_TEXT,
+ rsv1=1)
+
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ self.assertEquals('permessage-deflate', parameter.name())
+ self.assertEquals([('server_max_window_bits', '10'),
+ ('server_no_context_takeover', None)],
+ parameter.get_parameters())
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate; server_max_window_bits=10; '
+ 'server_no_context_takeover'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
+ def test_function(client):
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ raise Exception('Unexpected acceptance of permessage-deflate')
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate; server_max_window_bits=3000000'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
+ def test_function(client):
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ raise Exception('Unexpected acceptance of permessage-deflate')
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate; server_max_window_bits=3000000'],
+ response_checker,
+ test_function)
+
+ def test_echo_permessage_deflate_with_undefined_parameter(self):
+ def test_function(client):
+ client.send_close()
+ client.assert_receive_close()
+
+ def response_checker(parameter):
+ raise Exception('Unexpected acceptance of permessage-deflate')
+
+ self._run_permessage_deflate_test(
+ ['permessage-deflate; foo=bar'],
+ response_checker,
+ test_function)
+
+ def test_echo_close_with_code_and_reason(self):
+ self._options.resource = '/close'
+ self._run_close_with_code_and_reason_test(
+ _echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun')
+
+ def test_echo_close_with_empty_body(self):
+ self._options.resource = '/close'
+ self._run_close_with_code_and_reason_test(
+ _echo_check_procedure_with_code_and_reason, None, '')
+
+ def test_mux_echo(self):
+ self._run_mux_test(_mux_echo_check_procedure)
+
+ def test_close_on_protocol_error(self):
+ """Tests that the server sends a close frame with protocol error status
+ code when the client sends data with some protocol error.
+ """
+
+ def test_function(client):
+ client.connect()
+
+ # Intermediate frame without any preceding start of fragmentation
+ # frame.
+ client.send_frame_of_arbitrary_bytes('\x80\x80', '')
+ client.assert_receive_close(
+ client_for_testing.STATUS_PROTOCOL_ERROR)
+
+ self._run_test(test_function)
+
+ def test_close_on_unsupported_frame(self):
+ """Tests that the server sends a close frame with unsupported operation
+ status code when the client sends data asking some operation that is
+ not supported by the server.
+ """
+
+ def test_function(client):
+ client.connect()
+
+ # Text frame with RSV3 bit raised.
+ client.send_frame_of_arbitrary_bytes('\x91\x80', '')
+ client.assert_receive_close(
+ client_for_testing.STATUS_UNSUPPORTED_DATA)
+
+ self._run_test(test_function)
+
+ def test_close_on_invalid_frame(self):
+ """Tests that the server sends a close frame with invalid frame payload
+ data status code when the client sends an invalid frame like containing
+ invalid UTF-8 character.
+ """
+
+ def test_function(client):
+ client.connect()
+
+ # Text frame with invalid UTF-8 string.
+ client.send_message('\x80', raw=True)
+ client.assert_receive_close(
+ client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+
+ self._run_test(test_function)
+
+ def test_close_on_internal_endpoint_error(self):
+ """Tests that the server sends a close frame with internal endpoint
+ error status code when the handler does bad operation.
+ """
+
+ self._options.resource = '/internal_error'
+
+ def test_function(client):
+ client.connect()
+ client.assert_receive_close(
+ client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR)
+
+ self._run_test(test_function)
+
+ # TODO(toyoshim): Add tests to verify invalid absolute uri handling like
+ # host unmatch, port unmatch and invalid port description (':' without port
+ # number).
+
+ def test_absolute_uri(self):
+ """Tests absolute uri request."""
+
+ options = self._options
+ options.resource = 'ws://localhost:%d/echo' % options.server_port
+ self._run_test_with_client_options(_echo_check_procedure, options)
+
+ def test_origin_check(self):
+ """Tests http fallback on origin check fail."""
+
+ options = self._options
+ options.resource = '/origin_check'
+ # Server shows warning message for http 403 fallback. This warning
+ # message is confusing. Following pipe disposes warning messages.
+ self.server_stderr = subprocess.PIPE
+ self._run_http_fallback_test(options, 403)
+
+ def test_version_check(self):
+ """Tests http fallback on version check fail."""
+
+ options = self._options
+ options.version = 99
+ self._run_http_fallback_test(options, 400)
+
+
+class EndToEndHyBi00Test(EndToEndTestBase):
+ def setUp(self):
+ EndToEndTestBase.setUp(self)
+
+ def _run_test(self, test_function):
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ client = client_for_testing.create_client_hybi00(self._options)
+ try:
+ test_function(client)
+ finally:
+ client.close_socket()
+ finally:
+ self._kill_process(server.pid)
+
+ def test_echo(self):
+ self._run_test(_echo_check_procedure)
+
+ def test_echo_server_close(self):
+ self._run_test(_echo_check_procedure_with_goodbye)
+
+
+class EndToEndTestWithEchoClient(EndToEndTestBase):
+ def setUp(self):
+ EndToEndTestBase.setUp(self)
+
+ def _check_example_echo_client_result(
+ self, expected, stdoutdata, stderrdata):
+ actual = stdoutdata.decode("utf-8")
+ if actual != expected:
+ raise Exception('Unexpected result on example echo client: '
+ '%r (expected) vs %r (actual)' %
+ (expected, actual))
+ if stderrdata is not None:
+ raise Exception('Unexpected error message on example echo '
+ 'client: %r' % stderrdata)
+
+ def test_example_echo_client(self):
+ """Tests that the echo_client.py example can talk with the server."""
+
+ server = self._run_server()
+ try:
+ time.sleep(_SERVER_WARMUP_IN_SEC)
+
+ client_command = os.path.join(
+ self.top_dir, 'example', 'echo_client.py')
+
+ # Expected output for the default messages.
+ default_expectation = ('Send: Hello\n' 'Recv: Hello\n'
+ u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n'
+ 'Send close\n' 'Recv ack\n')
+
+ args = [client_command,
+ '-p', str(self._options.server_port)]
+ client = self._run_python_command(args, stdout=subprocess.PIPE)
+ stdoutdata, stderrdata = client.communicate()
+ self._check_example_echo_client_result(
+ default_expectation, stdoutdata, stderrdata)
+
+ # Process a big message for which extended payload length is used.
+ # To handle extended payload length, ws_version attribute will be
+ # accessed. This test checks that ws_version is correctly set.
+ big_message = 'a' * 1024
+ args = [client_command,
+ '-p', str(self._options.server_port),
+ '-m', big_message]
+ client = self._run_python_command(args, stdout=subprocess.PIPE)
+ stdoutdata, stderrdata = client.communicate()
+ expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' %
+ (big_message, big_message))
+ self._check_example_echo_client_result(
+ expected, stdoutdata, stderrdata)
+
+ # Test the permessage-deflate extension.
+ args = [client_command,
+ '-p', str(self._options.server_port),
+ '--use_permessage_deflate']
+ client = self._run_python_command(args, stdout=subprocess.PIPE)
+ stdoutdata, stderrdata = client.communicate()
+ self._check_example_echo_client_result(
+ default_expectation, stdoutdata, stderrdata)
+ finally:
+ self._kill_process(server.pid)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_extensions.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_extensions.py
new file mode 100755
index 000000000..6c8b1262d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_extensions.py
@@ -0,0 +1,360 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for extensions module."""
+
+
+import unittest
+import zlib
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import common
+from mod_pywebsocket import extensions
+
+
+class ExtensionsTest(unittest.TestCase):
+ """A unittest for non-class methods in extensions.py"""
+
+ def test_parse_window_bits(self):
+ self.assertRaises(ValueError, extensions._parse_window_bits, None)
+ self.assertRaises(ValueError, extensions._parse_window_bits, 'foobar')
+ self.assertRaises(ValueError, extensions._parse_window_bits, ' 8 ')
+ self.assertRaises(ValueError, extensions._parse_window_bits, 'a8a')
+ self.assertRaises(ValueError, extensions._parse_window_bits, '00000')
+ self.assertRaises(ValueError, extensions._parse_window_bits, '00008')
+ self.assertRaises(ValueError, extensions._parse_window_bits, '0x8')
+
+ self.assertRaises(ValueError, extensions._parse_window_bits, '9.5')
+ self.assertRaises(ValueError, extensions._parse_window_bits, '8.0')
+
+ self.assertTrue(extensions._parse_window_bits, '8')
+ self.assertTrue(extensions._parse_window_bits, '15')
+
+ self.assertRaises(ValueError, extensions._parse_window_bits, '-8')
+ self.assertRaises(ValueError, extensions._parse_window_bits, '0')
+ self.assertRaises(ValueError, extensions._parse_window_bits, '7')
+
+ self.assertRaises(ValueError, extensions._parse_window_bits, '16')
+ self.assertRaises(
+ ValueError, extensions._parse_window_bits, '10000000')
+
+
+class CompressionMethodParameterParserTest(unittest.TestCase):
+ """A unittest for _parse_compression_method which parses the compression
+ method description used by perframe-compression and permessage-compression
+ extension in their "method" extension parameter.
+ """
+
+ def test_parse_method_simple(self):
+ method_list = extensions._parse_compression_method('foo')
+ self.assertEqual(1, len(method_list))
+ method = method_list[0]
+ self.assertEqual('foo', method.name())
+ self.assertEqual(0, len(method.get_parameters()))
+
+ def test_parse_method_with_parameter(self):
+ method_list = extensions._parse_compression_method('foo; x; y=10')
+ self.assertEqual(1, len(method_list))
+ method = method_list[0]
+ self.assertEqual('foo', method.name())
+ self.assertEqual(2, len(method.get_parameters()))
+ self.assertTrue(method.has_parameter('x'))
+ self.assertEqual(None, method.get_parameter_value('x'))
+ self.assertTrue(method.has_parameter('y'))
+ self.assertEqual('10', method.get_parameter_value('y'))
+
+ def test_parse_method_with_quoted_parameter(self):
+ method_list = extensions._parse_compression_method(
+ 'foo; x="Hello World"; y=10')
+ self.assertEqual(1, len(method_list))
+ method = method_list[0]
+ self.assertEqual('foo', method.name())
+ self.assertEqual(2, len(method.get_parameters()))
+ self.assertTrue(method.has_parameter('x'))
+ self.assertEqual('Hello World', method.get_parameter_value('x'))
+ self.assertTrue(method.has_parameter('y'))
+ self.assertEqual('10', method.get_parameter_value('y'))
+
+ def test_parse_method_multiple(self):
+ method_list = extensions._parse_compression_method('foo, bar')
+ self.assertEqual(2, len(method_list))
+ self.assertEqual('foo', method_list[0].name())
+ self.assertEqual(0, len(method_list[0].get_parameters()))
+ self.assertEqual('bar', method_list[1].name())
+ self.assertEqual(0, len(method_list[1].get_parameters()))
+
+ def test_parse_method_multiple_methods_with_quoted_parameter(self):
+ method_list = extensions._parse_compression_method(
+ 'foo; x="Hello World", bar; y=10')
+ self.assertEqual(2, len(method_list))
+ self.assertEqual('foo', method_list[0].name())
+ self.assertEqual(1, len(method_list[0].get_parameters()))
+ self.assertTrue(method_list[0].has_parameter('x'))
+ self.assertEqual('Hello World',
+ method_list[0].get_parameter_value('x'))
+ self.assertEqual('bar', method_list[1].name())
+ self.assertEqual(1, len(method_list[1].get_parameters()))
+ self.assertTrue(method_list[1].has_parameter('y'))
+ self.assertEqual('10', method_list[1].get_parameter_value('y'))
+
+ def test_create_method_desc_simple(self):
+ params = common.ExtensionParameter('foo')
+ desc = extensions._create_accepted_method_desc('foo',
+ params.get_parameters())
+ self.assertEqual('foo', desc)
+
+ def test_create_method_desc_with_parameters(self):
+ params = common.ExtensionParameter('foo')
+ params.add_parameter('x', 'Hello, World')
+ params.add_parameter('y', '10')
+ desc = extensions._create_accepted_method_desc('foo',
+ params.get_parameters())
+ self.assertEqual('foo; x="Hello, World"; y=10', desc)
+
+
+class DeflateFrameExtensionProcessorParsingTest(unittest.TestCase):
+ """A unittest for checking that DeflateFrameExtensionProcessor parses given
+ extension parameter correctly.
+ """
+
+ def test_registry(self):
+ processor = extensions.get_extension_processor(
+ common.ExtensionParameter('deflate-frame'))
+ self.assertIsInstance(processor,
+ extensions.DeflateFrameExtensionProcessor)
+
+ processor = extensions.get_extension_processor(
+ common.ExtensionParameter('x-webkit-deflate-frame'))
+ self.assertIsInstance(processor,
+ extensions.DeflateFrameExtensionProcessor)
+
+ def test_minimal_offer(self):
+ processor = extensions.DeflateFrameExtensionProcessor(
+ common.ExtensionParameter('perframe-deflate'))
+
+ response = processor.get_extension_response()
+ self.assertEqual('perframe-deflate', response.name())
+ self.assertEqual(0, len(response.get_parameters()))
+
+ self.assertEqual(zlib.MAX_WBITS,
+ processor._rfc1979_deflater._window_bits)
+ self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
+
+ def test_offer_with_max_window_bits(self):
+ parameter = common.ExtensionParameter('perframe-deflate')
+ parameter.add_parameter('max_window_bits', '10')
+ processor = extensions.DeflateFrameExtensionProcessor(parameter)
+
+ response = processor.get_extension_response()
+ self.assertEqual('perframe-deflate', response.name())
+ self.assertEqual(0, len(response.get_parameters()))
+
+ self.assertEqual(10, processor._rfc1979_deflater._window_bits)
+
+ def test_offer_with_out_of_range_max_window_bits(self):
+ parameter = common.ExtensionParameter('perframe-deflate')
+ parameter.add_parameter('max_window_bits', '0')
+ processor = extensions.DeflateFrameExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+ def test_offer_with_max_window_bits_without_value(self):
+ parameter = common.ExtensionParameter('perframe-deflate')
+ parameter.add_parameter('max_window_bits', None)
+ processor = extensions.DeflateFrameExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+ def test_offer_with_no_context_takeover(self):
+ parameter = common.ExtensionParameter('perframe-deflate')
+ parameter.add_parameter('no_context_takeover', None)
+ processor = extensions.DeflateFrameExtensionProcessor(parameter)
+
+ response = processor.get_extension_response()
+ self.assertEqual('perframe-deflate', response.name())
+ self.assertEqual(0, len(response.get_parameters()))
+
+ self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
+
+ def test_offer_with_no_context_takeover_with_value(self):
+ parameter = common.ExtensionParameter('perframe-deflate')
+ parameter.add_parameter('no_context_takeover', 'foobar')
+ processor = extensions.DeflateFrameExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+ def test_offer_with_unknown_parameter(self):
+ parameter = common.ExtensionParameter('perframe-deflate')
+ parameter.add_parameter('foo', 'bar')
+ processor = extensions.DeflateFrameExtensionProcessor(parameter)
+
+ response = processor.get_extension_response()
+ self.assertEqual('perframe-deflate', response.name())
+ self.assertEqual(0, len(response.get_parameters()))
+
+
+class PerMessageDeflateExtensionProcessorParsingTest(unittest.TestCase):
+ """A unittest for checking that PerMessageDeflateExtensionProcessor parses
+ given extension parameter correctly.
+ """
+
+ def test_registry(self):
+ processor = extensions.get_extension_processor(
+ common.ExtensionParameter('permessage-deflate'))
+ self.assertIsInstance(processor,
+ extensions.PerMessageDeflateExtensionProcessor)
+
+ def test_minimal_offer(self):
+ processor = extensions.PerMessageDeflateExtensionProcessor(
+ common.ExtensionParameter('permessage-deflate'))
+
+ response = processor.get_extension_response()
+ self.assertEqual('permessage-deflate', response.name())
+ self.assertEqual(0, len(response.get_parameters()))
+
+ self.assertEqual(zlib.MAX_WBITS,
+ processor._rfc1979_deflater._window_bits)
+ self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
+
+ def test_offer_with_max_window_bits(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('server_max_window_bits', '10')
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+
+ response = processor.get_extension_response()
+ self.assertEqual('permessage-deflate', response.name())
+ self.assertEqual([('server_max_window_bits', '10')],
+ response.get_parameters())
+
+ self.assertEqual(10, processor._rfc1979_deflater._window_bits)
+
+ def test_offer_with_out_of_range_max_window_bits(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('server_max_window_bits', '0')
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+ def test_offer_with_max_window_bits_without_value(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('server_max_window_bits', None)
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+ def test_offer_with_no_context_takeover(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('server_no_context_takeover', None)
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+
+ response = processor.get_extension_response()
+ self.assertEqual('permessage-deflate', response.name())
+ self.assertEqual([('server_no_context_takeover', None)],
+ response.get_parameters())
+
+ self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
+
+ def test_offer_with_no_context_takeover_with_value(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('server_no_context_takeover', 'foobar')
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+ def test_offer_with_unknown_parameter(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('foo', 'bar')
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+
+ self.assertIsNone(processor.get_extension_response())
+
+
+class PerMessageDeflateExtensionProcessorBuildingTest(unittest.TestCase):
+ """A unittest for checking that PerMessageDeflateExtensionProcessor builds
+ a response based on specified options correctly.
+ """
+
+ def test_response_with_max_window_bits(self):
+ parameter = common.ExtensionParameter('permessage-deflate')
+ parameter.add_parameter('client_max_window_bits', None)
+ processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
+ processor.set_client_max_window_bits(10)
+
+ response = processor.get_extension_response()
+ self.assertEqual('permessage-deflate', response.name())
+ self.assertEqual([('client_max_window_bits', '10')],
+ response.get_parameters())
+
+ def test_response_with_max_window_bits_without_client_permission(self):
+ processor = extensions.PerMessageDeflateExtensionProcessor(
+ common.ExtensionParameter('permessage-deflate'))
+ processor.set_client_max_window_bits(10)
+
+ response = processor.get_extension_response()
+ self.assertIsNone(response)
+
+ def test_response_with_true_for_no_context_takeover(self):
+ processor = extensions.PerMessageDeflateExtensionProcessor(
+ common.ExtensionParameter('permessage-deflate'))
+
+ processor.set_client_no_context_takeover(True)
+
+ response = processor.get_extension_response()
+ self.assertEqual('permessage-deflate', response.name())
+ self.assertEqual([('client_no_context_takeover', None)],
+ response.get_parameters())
+
+ def test_response_with_false_for_no_context_takeover(self):
+ processor = extensions.PerMessageDeflateExtensionProcessor(
+ common.ExtensionParameter('permessage-deflate'))
+
+ processor.set_client_no_context_takeover(False)
+
+ response = processor.get_extension_response()
+ self.assertEqual('permessage-deflate', response.name())
+ self.assertEqual(0, len(response.get_parameters()))
+
+
+class PerMessageCompressExtensionProcessorTest(unittest.TestCase):
+ def test_registry(self):
+ processor = extensions.get_extension_processor(
+ common.ExtensionParameter('permessage-compress'))
+ self.assertIsInstance(processor,
+ extensions.PerMessageCompressExtensionProcessor)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake.py
new file mode 100755
index 000000000..aa78ac05e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for handshake._base module."""
+
+
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket.common import ExtensionParameter
+from mod_pywebsocket.common import ExtensionParsingException
+from mod_pywebsocket.common import format_extensions
+from mod_pywebsocket.common import parse_extensions
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import validate_subprotocol
+
+
+class ValidateSubprotocolTest(unittest.TestCase):
+ """A unittest for validate_subprotocol method."""
+
+ def test_validate_subprotocol(self):
+ # Should succeed.
+ validate_subprotocol('sample')
+ validate_subprotocol('Sample')
+ validate_subprotocol('sample\x7eprotocol')
+
+ # Should fail.
+ self.assertRaises(HandshakeException,
+ validate_subprotocol,
+ '')
+ self.assertRaises(HandshakeException,
+ validate_subprotocol,
+ 'sample\x09protocol')
+ self.assertRaises(HandshakeException,
+ validate_subprotocol,
+ 'sample\x19protocol')
+ self.assertRaises(HandshakeException,
+ validate_subprotocol,
+ 'sample\x20protocol')
+ self.assertRaises(HandshakeException,
+ validate_subprotocol,
+ 'sample\x7fprotocol')
+ self.assertRaises(HandshakeException,
+ validate_subprotocol,
+ # "Japan" in Japanese
+ u'\u65e5\u672c')
+
+
+_TEST_TOKEN_EXTENSION_DATA = [
+ ('foo', [('foo', [])]),
+ ('foo; bar', [('foo', [('bar', None)])]),
+ ('foo; bar=baz', [('foo', [('bar', 'baz')])]),
+ ('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
+ ('foo; bar=baz, car; cdr',
+ [('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
+ ('a, b, c, d',
+ [('a', []), ('b', []), ('c', []), ('d', [])]),
+ ]
+
+
+_TEST_QUOTED_EXTENSION_DATA = [
+ ('foo; bar=""', [('foo', [('bar', '')])]),
+ ('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
+ ('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
+ ('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
+ ('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
+ ('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
+ ]
+
+
+_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
+ ('foo \t ', [('foo', [])]),
+ ('foo; \r\n bar', [('foo', [('bar', None)])]),
+ ('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
+ ('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
+ ('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
+ ]
+
+
+_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
+ ('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
+ ]
+
+
+class ExtensionsParserTest(unittest.TestCase):
+
+ def _verify_extension_list(self, expected_list, actual_list):
+ """Verifies that ExtensionParameter objects in actual_list have the
+ same members as extension definitions in expected_list. Extension
+ definition used in this test is a pair of an extension name and a
+ parameter dictionary.
+ """
+
+ self.assertEqual(len(expected_list), len(actual_list))
+ for expected, actual in zip(expected_list, actual_list):
+ (name, parameters) = expected
+ self.assertEqual(name, actual._name)
+ self.assertEqual(parameters, actual._parameters)
+
+ def test_parse(self):
+ for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
+ self._verify_extension_list(
+ definition, parse_extensions(formatted_string))
+
+ def test_parse_quoted_data(self):
+ for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
+ self._verify_extension_list(
+ definition, parse_extensions(formatted_string))
+
+ def test_parse_redundant_data(self):
+ for (formatted_string,
+ definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
+ self._verify_extension_list(
+ definition, parse_extensions(formatted_string))
+
+ def test_parse_redundant_quoted_data(self):
+ for (formatted_string,
+ definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
+ self._verify_extension_list(
+ definition, parse_extensions(formatted_string))
+
+ def test_parse_bad_data(self):
+ _TEST_BAD_EXTENSION_DATA = [
+ ('foo; ; '),
+ ('foo; a a'),
+ ('foo foo'),
+ (',,,'),
+ ('foo; bar='),
+ ('foo; bar="hoge'),
+ ('foo; bar="a\r"'),
+ ('foo; bar="\\\xff"'),
+ ('foo; bar=\ra'),
+ ]
+
+ for formatted_string in _TEST_BAD_EXTENSION_DATA:
+ self.assertRaises(
+ ExtensionParsingException, parse_extensions, formatted_string)
+
+
+class FormatExtensionsTest(unittest.TestCase):
+
+ def test_format_extensions(self):
+ for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
+ extensions = []
+ for definition in definitions:
+ (name, parameters) = definition
+ extension = ExtensionParameter(name)
+ extension._parameters = parameters
+ extensions.append(extension)
+ self.assertEqual(
+ formatted_string, format_extensions(extensions))
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi.py
new file mode 100755
index 000000000..6c8713823
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for handshake module."""
+
+
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.handshake.hybi import Handshaker
+
+import mock
+
+
+class RequestDefinition(object):
+ """A class for holding data for constructing opening handshake strings for
+ testing the opening handshake processor.
+ """
+
+ def __init__(self, method, uri, headers):
+ self.method = method
+ self.uri = uri
+ self.headers = headers
+
+
+def _create_good_request_def():
+ return RequestDefinition(
+ 'GET', '/demo',
+ {'Host': 'server.example.com',
+ 'Upgrade': 'websocket',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': '13',
+ 'Origin': 'http://example.com'})
+
+
+def _create_request(request_def):
+ conn = mock.MockConn('')
+ return mock.MockRequest(
+ method=request_def.method,
+ uri=request_def.uri,
+ headers_in=request_def.headers,
+ connection=conn)
+
+
+def _create_handshaker(request):
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ return handshaker
+
+
+class SubprotocolChoosingDispatcher(object):
+ """A dispatcher for testing. This dispatcher sets the i-th subprotocol
+ of requested ones to ws_protocol where i is given on construction as index
+ argument. If index is negative, default_value will be set to ws_protocol.
+ """
+
+ def __init__(self, index, default_value=None):
+ self.index = index
+ self.default_value = default_value
+
+ def do_extra_handshake(self, conn_context):
+ if self.index >= 0:
+ conn_context.ws_protocol = conn_context.ws_requested_protocols[
+ self.index]
+ else:
+ conn_context.ws_protocol = self.default_value
+
+ def transfer_data(self, conn_context):
+ pass
+
+
+class HandshakeAbortedException(Exception):
+ pass
+
+
+class AbortingDispatcher(object):
+ """A dispatcher for testing. This dispatcher raises an exception in
+ do_extra_handshake to reject the request.
+ """
+
+ def do_extra_handshake(self, conn_context):
+ raise HandshakeAbortedException('An exception to reject the request')
+
+ def transfer_data(self, conn_context):
+ pass
+
+
+class AbortedByUserDispatcher(object):
+ """A dispatcher for testing. This dispatcher raises an
+ AbortedByUserException in do_extra_handshake to reject the request.
+ """
+
+ def do_extra_handshake(self, conn_context):
+ raise AbortedByUserException('An AbortedByUserException to reject the '
+ 'request')
+
+ def transfer_data(self, conn_context):
+ pass
+
+
+_EXPECTED_RESPONSE = (
+ 'HTTP/1.1 101 Switching Protocols\r\n'
+ 'Upgrade: websocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n')
+
+
+class HandshakerTest(unittest.TestCase):
+ """A unittest for draft-ietf-hybi-thewebsocketprotocol-06 and later
+ handshake processor.
+ """
+
+ def test_do_handshake(self):
+ request = _create_request(_create_good_request_def())
+ dispatcher = mock.MockDispatcher()
+ handshaker = Handshaker(request, dispatcher)
+ handshaker.do_handshake()
+
+ self.assertTrue(dispatcher.do_extra_handshake_called)
+
+ self.assertEqual(
+ _EXPECTED_RESPONSE, request.connection.written_data())
+ self.assertEqual('/demo', request.ws_resource)
+ self.assertEqual('http://example.com', request.ws_origin)
+ self.assertEqual(None, request.ws_protocol)
+ self.assertEqual(None, request.ws_extensions)
+ self.assertEqual(common.VERSION_HYBI_LATEST, request.ws_version)
+
+ def test_do_handshake_with_extra_headers(self):
+ request_def = _create_good_request_def()
+ # Add headers not related to WebSocket opening handshake.
+ request_def.headers['FooKey'] = 'BarValue'
+ request_def.headers['EmptyKey'] = ''
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(
+ _EXPECTED_RESPONSE, request.connection.written_data())
+
+ def test_do_handshake_with_capitalized_value(self):
+ request_def = _create_good_request_def()
+ request_def.headers['upgrade'] = 'WEBSOCKET'
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(
+ _EXPECTED_RESPONSE, request.connection.written_data())
+
+ request_def = _create_good_request_def()
+ request_def.headers['Connection'] = 'UPGRADE'
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(
+ _EXPECTED_RESPONSE, request.connection.written_data())
+
+ def test_do_handshake_with_multiple_connection_values(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Connection'] = 'Upgrade, keep-alive, , '
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(
+ _EXPECTED_RESPONSE, request.connection.written_data())
+
+ def test_aborting_handshake(self):
+ handshaker = Handshaker(
+ _create_request(_create_good_request_def()),
+ AbortingDispatcher())
+ # do_extra_handshake raises an exception. Check that it's not caught by
+ # do_handshake.
+ self.assertRaises(HandshakeAbortedException, handshaker.do_handshake)
+
+ def test_do_handshake_with_protocol(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat'
+
+ request = _create_request(request_def)
+ handshaker = Handshaker(request, SubprotocolChoosingDispatcher(0))
+ handshaker.do_handshake()
+
+ EXPECTED_RESPONSE = (
+ 'HTTP/1.1 101 Switching Protocols\r\n'
+ 'Upgrade: websocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
+ 'Sec-WebSocket-Protocol: chat\r\n\r\n')
+
+ self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data())
+ self.assertEqual('chat', request.ws_protocol)
+
+ def test_do_handshake_protocol_not_in_request_but_in_response(self):
+ request_def = _create_good_request_def()
+ request = _create_request(request_def)
+ handshaker = Handshaker(
+ request, SubprotocolChoosingDispatcher(-1, 'foobar'))
+ # No request has been made but ws_protocol is set. HandshakeException
+ # must be raised.
+ self.assertRaises(HandshakeException, handshaker.do_handshake)
+
+ def test_do_handshake_with_protocol_no_protocol_selection(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat'
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ # ws_protocol is not set. HandshakeException must be raised.
+ self.assertRaises(HandshakeException, handshaker.do_handshake)
+
+ def test_do_handshake_with_extensions(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = (
+ 'permessage-compress; method=deflate, unknown')
+
+ EXPECTED_RESPONSE = (
+ 'HTTP/1.1 101 Switching Protocols\r\n'
+ 'Upgrade: websocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
+ 'Sec-WebSocket-Extensions: permessage-compress; method=deflate\r\n'
+ '\r\n')
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data())
+ self.assertEqual(1, len(request.ws_extensions))
+ extension = request.ws_extensions[0]
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ extension.name())
+ self.assertEqual(['method'], extension.get_parameter_names())
+ self.assertEqual('deflate', extension.get_parameter_value('method'))
+ self.assertEqual(1, len(request.ws_extension_processors))
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ request.ws_extension_processors[0].name())
+
+ def test_do_handshake_with_permessage_compress(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = (
+ 'permessage-compress; method=deflate')
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(1, len(request.ws_extensions))
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ request.ws_extensions[0].name())
+ self.assertEqual(1, len(request.ws_extension_processors))
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ request.ws_extension_processors[0].name())
+
+ def test_do_handshake_with_quoted_extensions(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = (
+ 'permessage-compress; method=deflate, , '
+ 'unknown; e = "mc^2"; ma="\r\n \\\rf "; pv=nrt')
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(2, len(request.ws_requested_extensions))
+ first_extension = request.ws_requested_extensions[0]
+ self.assertEqual('permessage-compress', first_extension.name())
+ self.assertEqual(['method'], first_extension.get_parameter_names())
+ self.assertEqual('deflate',
+ first_extension.get_parameter_value('method'))
+ second_extension = request.ws_requested_extensions[1]
+ self.assertEqual('unknown', second_extension.name())
+ self.assertEqual(
+ ['e', 'ma', 'pv'], second_extension.get_parameter_names())
+ self.assertEqual('mc^2', second_extension.get_parameter_value('e'))
+ self.assertEqual(' \rf ', second_extension.get_parameter_value('ma'))
+ self.assertEqual('nrt', second_extension.get_parameter_value('pv'))
+
+ def test_do_handshake_with_optional_headers(self):
+ request_def = _create_good_request_def()
+ request_def.headers['EmptyValue'] = ''
+ request_def.headers['AKey'] = 'AValue'
+
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ self.assertEqual(
+ 'AValue', request.headers_in['AKey'])
+ self.assertEqual(
+ '', request.headers_in['EmptyValue'])
+
+ def test_abort_extra_handshake(self):
+ handshaker = Handshaker(
+ _create_request(_create_good_request_def()),
+ AbortedByUserDispatcher())
+ # do_extra_handshake raises an AbortedByUserException. Check that it's
+ # not caught by do_handshake.
+ self.assertRaises(AbortedByUserException, handshaker.do_handshake)
+
+ def test_do_handshake_with_mux_and_deflate_frame(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % (
+ common.MUX_EXTENSION,
+ common.DEFLATE_FRAME_EXTENSION))
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ # mux should be rejected.
+ self.assertEqual(1, len(request.ws_extensions))
+ self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
+ request.ws_extensions[0].name())
+ self.assertEqual(2, len(request.ws_extension_processors))
+ self.assertEqual(common.MUX_EXTENSION,
+ request.ws_extension_processors[0].name())
+ self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
+ request.ws_extension_processors[1].name())
+ self.assertFalse(hasattr(request, 'mux_processor'))
+
+ def test_do_handshake_with_deflate_frame_and_mux(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % (
+ common.DEFLATE_FRAME_EXTENSION,
+ common.MUX_EXTENSION))
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ # mux should be rejected.
+ self.assertEqual(1, len(request.ws_extensions))
+ first_extension = request.ws_extensions[0]
+ self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
+ first_extension.name())
+ self.assertEqual(2, len(request.ws_extension_processors))
+ self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
+ request.ws_extension_processors[0].name())
+ self.assertEqual(common.MUX_EXTENSION,
+ request.ws_extension_processors[1].name())
+ self.assertFalse(hasattr(request, 'mux'))
+
+ def test_do_handshake_with_permessage_compress_and_mux(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = (
+ '%s; method=deflate, %s' % (
+ common.PERMESSAGE_COMPRESSION_EXTENSION,
+ common.MUX_EXTENSION))
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+
+ self.assertEqual(1, len(request.ws_extensions))
+ self.assertEqual(common.MUX_EXTENSION,
+ request.ws_extensions[0].name())
+ self.assertEqual(2, len(request.ws_extension_processors))
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ request.ws_extension_processors[0].name())
+ self.assertEqual(common.MUX_EXTENSION,
+ request.ws_extension_processors[1].name())
+ self.assertTrue(hasattr(request, 'mux_processor'))
+ self.assertTrue(request.mux_processor.is_active())
+ mux_extensions = request.mux_processor.extensions()
+ self.assertEqual(1, len(mux_extensions))
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ mux_extensions[0].name())
+
+ def test_do_handshake_with_mux_and_permessage_compress(self):
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Extensions'] = (
+ '%s, %s; method=deflate' % (
+ common.MUX_EXTENSION,
+ common.PERMESSAGE_COMPRESSION_EXTENSION))
+ request = _create_request(request_def)
+ handshaker = _create_handshaker(request)
+ handshaker.do_handshake()
+ # mux should be rejected.
+ self.assertEqual(1, len(request.ws_extensions))
+ first_extension = request.ws_extensions[0]
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ first_extension.name())
+ self.assertEqual(2, len(request.ws_extension_processors))
+ self.assertEqual(common.MUX_EXTENSION,
+ request.ws_extension_processors[0].name())
+ self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
+ request.ws_extension_processors[1].name())
+ self.assertFalse(hasattr(request, 'mux_processor'))
+
+ def test_bad_requests(self):
+ bad_cases = [
+ ('HTTP request',
+ RequestDefinition(
+ 'GET', '/demo',
+ {'Host': 'www.google.com',
+ 'User-Agent':
+ 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
+ ' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
+ ' GTB6 GTBA',
+ 'Accept':
+ 'text/html,application/xhtml+xml,application/xml;q=0.9,'
+ '*/*;q=0.8',
+ 'Accept-Language': 'en-us,en;q=0.5',
+ 'Accept-Encoding': 'gzip,deflate',
+ 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
+ 'Keep-Alive': '300',
+ 'Connection': 'keep-alive'}), None, True)]
+
+ request_def = _create_good_request_def()
+ request_def.method = 'POST'
+ bad_cases.append(('Wrong method', request_def, None, True))
+
+ request_def = _create_good_request_def()
+ del request_def.headers['Host']
+ bad_cases.append(('Missing Host', request_def, None, True))
+
+ request_def = _create_good_request_def()
+ del request_def.headers['Upgrade']
+ bad_cases.append(('Missing Upgrade', request_def, None, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Upgrade'] = 'nonwebsocket'
+ bad_cases.append(('Wrong Upgrade', request_def, None, True))
+
+ request_def = _create_good_request_def()
+ del request_def.headers['Connection']
+ bad_cases.append(('Missing Connection', request_def, None, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Connection'] = 'Downgrade'
+ bad_cases.append(('Wrong Connection', request_def, None, True))
+
+ request_def = _create_good_request_def()
+ del request_def.headers['Sec-WebSocket-Key']
+ bad_cases.append(('Missing Sec-WebSocket-Key', request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Key'] = (
+ 'dGhlIHNhbXBsZSBub25jZQ==garbage')
+ bad_cases.append(('Wrong Sec-WebSocket-Key (with garbage on the tail)',
+ request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Key'] = 'YQ==' # BASE64 of 'a'
+ bad_cases.append(
+ ('Wrong Sec-WebSocket-Key (decoded value is not 16 octets long)',
+ request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ # The last character right before == must be any of A, Q, w and g.
+ request_def.headers['Sec-WebSocket-Key'] = (
+ 'AQIDBAUGBwgJCgsMDQ4PEC==')
+ bad_cases.append(
+ ('Wrong Sec-WebSocket-Key (padding bits are not zero)',
+ request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Key'] = (
+ 'dGhlIHNhbXBsZSBub25jZQ==,dGhlIHNhbXBsZSBub25jZQ==')
+ bad_cases.append(
+ ('Wrong Sec-WebSocket-Key (multiple values)',
+ request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ del request_def.headers['Sec-WebSocket-Version']
+ bad_cases.append(('Missing Sec-WebSocket-Version', request_def, None,
+ True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Version'] = '3'
+ bad_cases.append(('Wrong Sec-WebSocket-Version', request_def, None,
+ False))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Version'] = '13, 13'
+ bad_cases.append(('Wrong Sec-WebSocket-Version (multiple values)',
+ request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Protocol'] = 'illegal\x09protocol'
+ bad_cases.append(('Illegal Sec-WebSocket-Protocol',
+ request_def, 400, True))
+
+ request_def = _create_good_request_def()
+ request_def.headers['Sec-WebSocket-Protocol'] = ''
+ bad_cases.append(('Empty Sec-WebSocket-Protocol',
+ request_def, 400, True))
+
+ for (case_name, request_def, expected_status,
+ expect_handshake_exception) in bad_cases:
+ request = _create_request(request_def)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ try:
+ handshaker.do_handshake()
+ self.fail('No exception thrown for \'%s\' case' % case_name)
+ except HandshakeException, e:
+ self.assertTrue(expect_handshake_exception)
+ self.assertEqual(expected_status, e.status)
+ except VersionException, e:
+ self.assertFalse(expect_handshake_exception)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi00.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi00.py
new file mode 100755
index 000000000..73f9f27ca
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_handshake_hybi00.py
@@ -0,0 +1,516 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for handshake.hybi00 module."""
+
+
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake.hybi00 import Handshaker
+from mod_pywebsocket.handshake.hybi00 import _validate_subprotocol
+from test import mock
+
+
+_TEST_KEY1 = '4 @1 46546xW%0l 1 5'
+_TEST_KEY2 = '12998 5 Y3 1 .P00'
+_TEST_KEY3 = '^n:ds[4U'
+_TEST_CHALLENGE_RESPONSE = '8jKS\'y:G*Co,Wxa-'
+
+
+_GOOD_REQUEST = (
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES = (
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'UPGRADE',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WEBSOCKET',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES = (
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'hOsT': 'example.com',
+ 'cOnNeCtIoN': 'Upgrade',
+ 'sEc-wEbsOcKeT-kEy2': _TEST_KEY2,
+ 'sEc-wEbsOcKeT-pRoToCoL': 'sample',
+ 'uPgRaDe': 'WebSocket',
+ 'sEc-wEbsOcKeT-kEy1': _TEST_KEY1,
+ 'oRiGiN': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+_GOOD_RESPONSE_DEFAULT_PORT = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: ws://example.com/demo\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ 'Sec-WebSocket-Protocol: sample\r\n'
+ '\r\n' +
+ _TEST_CHALLENGE_RESPONSE)
+
+_GOOD_RESPONSE_SECURE = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: wss://example.com/demo\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ 'Sec-WebSocket-Protocol: sample\r\n'
+ '\r\n' +
+ _TEST_CHALLENGE_RESPONSE)
+
+_GOOD_REQUEST_NONDEFAULT_PORT = (
+ 8081,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com:8081',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+_GOOD_RESPONSE_NONDEFAULT_PORT = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: ws://example.com:8081/demo\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ 'Sec-WebSocket-Protocol: sample\r\n'
+ '\r\n' +
+ _TEST_CHALLENGE_RESPONSE)
+
+_GOOD_RESPONSE_SECURE_NONDEF = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: wss://example.com:8081/demo\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ 'Sec-WebSocket-Protocol: sample\r\n'
+ '\r\n' +
+ _TEST_CHALLENGE_RESPONSE)
+
+_GOOD_REQUEST_NO_PROTOCOL = (
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+_GOOD_RESPONSE_NO_PROTOCOL = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: ws://example.com/demo\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ '\r\n' +
+ _TEST_CHALLENGE_RESPONSE)
+
+_GOOD_REQUEST_WITH_OPTIONAL_HEADERS = (
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'EmptyValue': '',
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'AKey': 'AValue',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+# TODO(tyoshino): Include \r \n in key3, challenge response.
+
+_GOOD_REQUEST_WITH_NONPRINTABLE_KEY = (
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': 'y R2 48 Q1O4 e|BV3 i5 1 u- 65',
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': '36 7 74 i 92 2\'m 9 0G',
+ 'Origin': 'http://example.com',
+ },
+ ''.join(map(chr, [0x01, 0xd1, 0xdd, 0x3b, 0xd1, 0x56, 0x63, 0xff])))
+
+_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: ws://example.com/demo\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ 'Sec-WebSocket-Protocol: sample\r\n'
+ '\r\n' +
+ ''.join(map(chr, [0x0b, 0x99, 0xfa, 0x55, 0xbd, 0x01, 0x23, 0x7b,
+ 0x45, 0xa2, 0xf1, 0xd0, 0x87, 0x8a, 0xee, 0xeb])))
+
+_GOOD_REQUEST_WITH_QUERY_PART = (
+ 80,
+ 'GET',
+ '/demo?e=mc2',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3)
+
+_GOOD_RESPONSE_WITH_QUERY_PART = (
+ 'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
+ 'Upgrade: WebSocket\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Sec-WebSocket-Location: ws://example.com/demo?e=mc2\r\n'
+ 'Sec-WebSocket-Origin: http://example.com\r\n'
+ 'Sec-WebSocket-Protocol: sample\r\n'
+ '\r\n' +
+ _TEST_CHALLENGE_RESPONSE)
+
+_BAD_REQUESTS = (
+ ( # HTTP request
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'www.google.com',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
+ ' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
+ ' GTB6 GTBA',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
+ '*/*;q=0.8',
+ 'Accept-Language': 'en-us,en;q=0.5',
+ 'Accept-Encoding': 'gzip,deflate',
+ 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
+ 'Keep-Alive': '300',
+ 'Connection': 'keep-alive',
+ }),
+ ( # Wrong method
+ 80,
+ 'POST',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+ ( # Missing Upgrade
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+ ( # Wrong Upgrade
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'NonWebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+ ( # Empty WebSocket-Protocol
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': '',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+ ( # Wrong port number format
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com:0x50',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+ ( # Header/connection port mismatch
+ 8080,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'sample',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+ ( # Illegal WebSocket-Protocol
+ 80,
+ 'GET',
+ '/demo',
+ {
+ 'Host': 'example.com',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key2': _TEST_KEY2,
+ 'Sec-WebSocket-Protocol': 'illegal\x09protocol',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Key1': _TEST_KEY1,
+ 'Origin': 'http://example.com',
+ },
+ _TEST_KEY3),
+)
+
+
+def _create_request(request_def):
+ data = ''
+ if len(request_def) > 4:
+ data = request_def[4]
+ conn = mock.MockConn(data)
+ conn.local_addr = ('0.0.0.0', request_def[0])
+ return mock.MockRequest(
+ method=request_def[1],
+ uri=request_def[2],
+ headers_in=request_def[3],
+ connection=conn)
+
+
+def _create_get_memorized_lines(lines):
+ """Creates a function that returns the given string."""
+
+ def get_memorized_lines():
+ return lines
+ return get_memorized_lines
+
+
+def _create_requests_with_lines(request_lines_set):
+ requests = []
+ for lines in request_lines_set:
+ request = _create_request(_GOOD_REQUEST)
+ request.connection.get_memorized_lines = _create_get_memorized_lines(
+ lines)
+ requests.append(request)
+ return requests
+
+
+class HyBi00HandshakerTest(unittest.TestCase):
+
+ def test_good_request_default_port(self):
+ request = _create_request(_GOOD_REQUEST)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
+ request.connection.written_data())
+ self.assertEqual('/demo', request.ws_resource)
+ self.assertEqual('http://example.com', request.ws_origin)
+ self.assertEqual('ws://example.com/demo', request.ws_location)
+ self.assertEqual('sample', request.ws_protocol)
+
+ def test_good_request_capitalized_header_values(self):
+ request = _create_request(_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
+ request.connection.written_data())
+
+ def test_good_request_case_mixed_header_names(self):
+ request = _create_request(_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
+ request.connection.written_data())
+
+ def test_good_request_secure_default_port(self):
+ request = _create_request(_GOOD_REQUEST)
+ request.connection.local_addr = ('0.0.0.0', 443)
+ request.is_https_ = True
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_SECURE,
+ request.connection.written_data())
+ self.assertEqual('sample', request.ws_protocol)
+
+ def test_good_request_nondefault_port(self):
+ request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
+ handshaker = Handshaker(request,
+ mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_NONDEFAULT_PORT,
+ request.connection.written_data())
+ self.assertEqual('sample', request.ws_protocol)
+
+ def test_good_request_secure_non_default_port(self):
+ request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
+ request.is_https_ = True
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_SECURE_NONDEF,
+ request.connection.written_data())
+ self.assertEqual('sample', request.ws_protocol)
+
+ def test_good_request_default_no_protocol(self):
+ request = _create_request(_GOOD_REQUEST_NO_PROTOCOL)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_NO_PROTOCOL,
+ request.connection.written_data())
+ self.assertEqual(None, request.ws_protocol)
+
+ def test_good_request_optional_headers(self):
+ request = _create_request(_GOOD_REQUEST_WITH_OPTIONAL_HEADERS)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual('AValue',
+ request.headers_in['AKey'])
+ self.assertEqual('',
+ request.headers_in['EmptyValue'])
+
+ def test_good_request_with_nonprintable_key(self):
+ request = _create_request(_GOOD_REQUEST_WITH_NONPRINTABLE_KEY)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY,
+ request.connection.written_data())
+ self.assertEqual('sample', request.ws_protocol)
+
+ def test_good_request_with_query_part(self):
+ request = _create_request(_GOOD_REQUEST_WITH_QUERY_PART)
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ handshaker.do_handshake()
+ self.assertEqual(_GOOD_RESPONSE_WITH_QUERY_PART,
+ request.connection.written_data())
+ self.assertEqual('ws://example.com/demo?e=mc2', request.ws_location)
+
+ def test_bad_requests(self):
+ for request in map(_create_request, _BAD_REQUESTS):
+ handshaker = Handshaker(request, mock.MockDispatcher())
+ self.assertRaises(HandshakeException, handshaker.do_handshake)
+
+
+class HyBi00ValidateSubprotocolTest(unittest.TestCase):
+ def test_validate_subprotocol(self):
+ # should succeed.
+ _validate_subprotocol('sample')
+ _validate_subprotocol('Sample')
+ _validate_subprotocol('sample\x7eprotocol')
+ _validate_subprotocol('sample\x20protocol')
+
+ # should fail.
+ self.assertRaises(HandshakeException,
+ _validate_subprotocol,
+ '')
+ self.assertRaises(HandshakeException,
+ _validate_subprotocol,
+ 'sample\x19protocol')
+ self.assertRaises(HandshakeException,
+ _validate_subprotocol,
+ 'sample\x7fprotocol')
+ self.assertRaises(HandshakeException,
+ _validate_subprotocol,
+ # "Japan" in Japanese
+ u'\u65e5\u672c')
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_http_header_util.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_http_header_util.py
new file mode 100755
index 000000000..436dc57c3
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_http_header_util.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for http_header_util module."""
+
+
+import unittest
+
+from mod_pywebsocket import http_header_util
+
+
+class UnitTest(unittest.TestCase):
+ """A unittest for http_header_util module."""
+
+ def test_parse_relative_uri(self):
+ host, port, resource = http_header_util.parse_uri('/ws/test')
+ self.assertEqual(None, host)
+ self.assertEqual(None, port)
+ self.assertEqual('/ws/test', resource)
+
+ def test_parse_absolute_uri(self):
+ host, port, resource = http_header_util.parse_uri(
+ 'ws://localhost:10080/ws/test')
+ self.assertEqual('localhost', host)
+ self.assertEqual(10080, port)
+ self.assertEqual('/ws/test', resource)
+
+ host, port, resource = http_header_util.parse_uri(
+ 'ws://example.com/ws/test')
+ self.assertEqual('example.com', host)
+ self.assertEqual(80, port)
+ self.assertEqual('/ws/test', resource)
+
+ host, port, resource = http_header_util.parse_uri(
+ 'wss://example.com/')
+ self.assertEqual('example.com', host)
+ self.assertEqual(443, port)
+ self.assertEqual('/', resource)
+
+ host, port, resource = http_header_util.parse_uri(
+ 'ws://example.com:8080')
+ self.assertEqual('example.com', host)
+ self.assertEqual(8080, port)
+ self.assertEqual('/', resource)
+
+ def test_parse_invalid_uri(self):
+ host, port, resource = http_header_util.parse_uri('ws:///')
+ self.assertEqual(None, resource)
+
+ host, port, resource = http_header_util.parse_uri('ws://localhost:')
+ self.assertEqual(None, resource)
+
+ host, port, resource = http_header_util.parse_uri('ws://localhost:/ws')
+ self.assertEqual(None, resource)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_memorizingfile.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_memorizingfile.py
new file mode 100755
index 000000000..8f1b8eef4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_memorizingfile.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for memorizingfile module."""
+
+
+import StringIO
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import memorizingfile
+
+
+class UtilTest(unittest.TestCase):
+ """A unittest for memorizingfile module."""
+
+ def check(self, memorizing_file, num_read, expected_list):
+ for unused in range(num_read):
+ memorizing_file.readline()
+ actual_list = memorizing_file.get_memorized_lines()
+ self.assertEqual(len(expected_list), len(actual_list))
+ for expected, actual in zip(expected_list, actual_list):
+ self.assertEqual(expected, actual)
+
+ def check_with_size(self, memorizing_file, read_size, expected_list):
+ read_list = []
+ read_line = ''
+ while True:
+ line = memorizing_file.readline(read_size)
+ line_length = len(line)
+ self.assertTrue(line_length <= read_size)
+ if line_length == 0:
+ if read_line != '':
+ read_list.append(read_line)
+ break
+ read_line += line
+ if line[line_length - 1] == '\n':
+ read_list.append(read_line)
+ read_line = ''
+ actual_list = memorizing_file.get_memorized_lines()
+ self.assertEqual(len(expected_list), len(actual_list))
+ self.assertEqual(len(expected_list), len(read_list))
+ for expected, actual, read in zip(expected_list, actual_list,
+ read_list):
+ self.assertEqual(expected, actual)
+ self.assertEqual(expected, read)
+
+ def test_get_memorized_lines(self):
+ memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
+ 'Hello\nWorld\nWelcome'))
+ self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
+
+ def test_get_memorized_lines_limit_memorized_lines(self):
+ memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
+ 'Hello\nWorld\nWelcome'), 2)
+ self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
+
+ def test_get_memorized_lines_empty_file(self):
+ memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
+ ''))
+ self.check(memorizing_file, 10, [])
+
+ def test_get_memorized_lines_with_size(self):
+ for size in range(1, 10):
+ memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
+ 'Hello\nWorld\nWelcome'))
+ self.check_with_size(memorizing_file, size,
+ ['Hello\n', 'World\n', 'Welcome'])
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_mock.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_mock.py
new file mode 100755
index 000000000..7dc23a73d
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_mock.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for mock module."""
+
+
+import Queue
+import threading
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from test import mock
+
+
+class MockConnTest(unittest.TestCase):
+ """A unittest for MockConn class."""
+
+ def setUp(self):
+ self._conn = mock.MockConn('ABC\r\nDEFG\r\n\r\nHIJK')
+
+ def test_readline(self):
+ self.assertEqual('ABC\r\n', self._conn.readline())
+ self.assertEqual('DEFG\r\n', self._conn.readline())
+ self.assertEqual('\r\n', self._conn.readline())
+ self.assertEqual('HIJK', self._conn.readline())
+ self.assertEqual('', self._conn.readline())
+
+ def test_read(self):
+ self.assertEqual('ABC\r\nD', self._conn.read(6))
+ self.assertEqual('EFG\r\n\r\nHI', self._conn.read(9))
+ self.assertEqual('JK', self._conn.read(10))
+ self.assertEqual('', self._conn.read(10))
+
+ def test_read_and_readline(self):
+ self.assertEqual('ABC\r\nD', self._conn.read(6))
+ self.assertEqual('EFG\r\n', self._conn.readline())
+ self.assertEqual('\r\nHIJK', self._conn.read(9))
+ self.assertEqual('', self._conn.readline())
+
+ def test_write(self):
+ self._conn.write('Hello\r\n')
+ self._conn.write('World\r\n')
+ self.assertEqual('Hello\r\nWorld\r\n', self._conn.written_data())
+
+
+class MockBlockingConnTest(unittest.TestCase):
+ """A unittest for MockBlockingConn class."""
+
+ def test_read(self):
+ """Tests that data put to MockBlockingConn by put_bytes method can be
+ read from it.
+ """
+
+ class LineReader(threading.Thread):
+ """A test class that launches a thread, calls readline on the
+ specified conn repeatedly and puts the read data to the specified
+ queue.
+ """
+
+ def __init__(self, conn, queue):
+ threading.Thread.__init__(self)
+ self._queue = queue
+ self._conn = conn
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ while True:
+ data = self._conn.readline()
+ self._queue.put(data)
+
+ conn = mock.MockBlockingConn()
+ queue = Queue.Queue()
+ reader = LineReader(conn, queue)
+ self.failUnless(queue.empty())
+ conn.put_bytes('Foo bar\r\n')
+ read = queue.get()
+ self.assertEqual('Foo bar\r\n', read)
+
+
+class MockTableTest(unittest.TestCase):
+ """A unittest for MockTable class."""
+
+ def test_create_from_dict(self):
+ table = mock.MockTable({'Key': 'Value'})
+ self.assertEqual('Value', table.get('KEY'))
+ self.assertEqual('Value', table['key'])
+
+ def test_create_from_list(self):
+ table = mock.MockTable([('Key', 'Value')])
+ self.assertEqual('Value', table.get('KEY'))
+ self.assertEqual('Value', table['key'])
+
+ def test_create_from_tuple(self):
+ table = mock.MockTable((('Key', 'Value'),))
+ self.assertEqual('Value', table.get('KEY'))
+ self.assertEqual('Value', table['key'])
+
+ def test_set_and_get(self):
+ table = mock.MockTable()
+ self.assertEqual(None, table.get('Key'))
+ table['Key'] = 'Value'
+ self.assertEqual('Value', table.get('Key'))
+ self.assertEqual('Value', table.get('key'))
+ self.assertEqual('Value', table.get('KEY'))
+ self.assertEqual('Value', table['Key'])
+ self.assertEqual('Value', table['key'])
+ self.assertEqual('Value', table['KEY'])
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_msgutil.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_msgutil.py
new file mode 100755
index 000000000..5fedcf92f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_msgutil.py
@@ -0,0 +1,1356 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for msgutil module."""
+
+
+import array
+import Queue
+import random
+import struct
+import unittest
+import zlib
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor
+from mod_pywebsocket.extensions import PerMessageCompressExtensionProcessor
+from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor
+from mod_pywebsocket import msgutil
+from mod_pywebsocket.stream import InvalidUTF8Exception
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+from test import mock
+
+
+# We use one fixed nonce for testing instead of cryptographically secure PRNG.
+_MASKING_NONCE = 'ABCD'
+
+
+def _mask_hybi(frame):
+ frame_key = map(ord, _MASKING_NONCE)
+ frame_key_len = len(frame_key)
+ result = array.array('B')
+ result.fromstring(frame)
+ count = 0
+ for i in xrange(len(result)):
+ result[i] ^= frame_key[count]
+ count = (count + 1) % frame_key_len
+ return _MASKING_NONCE + result.tostring()
+
+
+def _install_extension_processor(processor, request, stream_options):
+ response = processor.get_extension_response()
+ if response is not None:
+ processor.setup_stream_options(stream_options)
+ request.ws_extension_processors.append(processor)
+
+
+def _create_request_from_rawdata(
+ read_data,
+ deflate_frame_request=None,
+ permessage_compression_request=None,
+ permessage_deflate_request=None):
+ req = mock.MockRequest(connection=mock.MockConn(''.join(read_data)))
+ req.ws_version = common.VERSION_HYBI_LATEST
+ req.ws_extension_processors = []
+
+ processor = None
+ if deflate_frame_request is not None:
+ processor = DeflateFrameExtensionProcessor(deflate_frame_request)
+ elif permessage_compression_request is not None:
+ processor = PerMessageCompressExtensionProcessor(
+ permessage_compression_request)
+ elif permessage_deflate_request is not None:
+ processor = PerMessageDeflateExtensionProcessor(
+ permessage_deflate_request)
+
+ stream_options = StreamOptions()
+ if processor is not None:
+ _install_extension_processor(processor, req, stream_options)
+ req.ws_stream = Stream(req, stream_options)
+
+ return req
+
+
+def _create_request(*frames):
+ """Creates MockRequest using data given as frames.
+
+ frames will be returned on calling request.connection.read() where request
+ is MockRequest returned by this function.
+ """
+
+ read_data = []
+ for (header, body) in frames:
+ read_data.append(header + _mask_hybi(body))
+
+ return _create_request_from_rawdata(read_data)
+
+
+def _create_blocking_request():
+ """Creates MockRequest.
+
+ Data written to a MockRequest can be read out by calling
+ request.connection.written_data().
+ """
+
+ req = mock.MockRequest(connection=mock.MockBlockingConn())
+ req.ws_version = common.VERSION_HYBI_LATEST
+ stream_options = StreamOptions()
+ req.ws_stream = Stream(req, stream_options)
+ return req
+
+
+def _create_request_hixie75(read_data=''):
+ req = mock.MockRequest(connection=mock.MockConn(read_data))
+ req.ws_stream = StreamHixie75(req)
+ return req
+
+
+def _create_blocking_request_hixie75():
+ req = mock.MockRequest(connection=mock.MockBlockingConn())
+ req.ws_stream = StreamHixie75(req)
+ return req
+
+
+class BasicMessageTest(unittest.TestCase):
+ """Basic tests for Stream."""
+
+ def test_send_message(self):
+ request = _create_request()
+ msgutil.send_message(request, 'Hello')
+ self.assertEqual('\x81\x05Hello', request.connection.written_data())
+
+ payload = 'a' * 125
+ request = _create_request()
+ msgutil.send_message(request, payload)
+ self.assertEqual('\x81\x7d' + payload,
+ request.connection.written_data())
+
+ def test_send_medium_message(self):
+ payload = 'a' * 126
+ request = _create_request()
+ msgutil.send_message(request, payload)
+ self.assertEqual('\x81\x7e\x00\x7e' + payload,
+ request.connection.written_data())
+
+ payload = 'a' * ((1 << 16) - 1)
+ request = _create_request()
+ msgutil.send_message(request, payload)
+ self.assertEqual('\x81\x7e\xff\xff' + payload,
+ request.connection.written_data())
+
+ def test_send_large_message(self):
+ payload = 'a' * (1 << 16)
+ request = _create_request()
+ msgutil.send_message(request, payload)
+ self.assertEqual('\x81\x7f\x00\x00\x00\x00\x00\x01\x00\x00' + payload,
+ request.connection.written_data())
+
+ def test_send_message_unicode(self):
+ request = _create_request()
+ msgutil.send_message(request, u'\u65e5')
+ # U+65e5 is encoded as e6,97,a5 in UTF-8
+ self.assertEqual('\x81\x03\xe6\x97\xa5',
+ request.connection.written_data())
+
+ def test_send_message_fragments(self):
+ request = _create_request()
+ msgutil.send_message(request, 'Hello', False)
+ msgutil.send_message(request, ' ', False)
+ msgutil.send_message(request, 'World', False)
+ msgutil.send_message(request, '!', True)
+ self.assertEqual('\x01\x05Hello\x00\x01 \x00\x05World\x80\x01!',
+ request.connection.written_data())
+
+ def test_send_fragments_immediate_zero_termination(self):
+ request = _create_request()
+ msgutil.send_message(request, 'Hello World!', False)
+ msgutil.send_message(request, '', True)
+ self.assertEqual('\x01\x0cHello World!\x80\x00',
+ request.connection.written_data())
+
+ def test_receive_message(self):
+ request = _create_request(
+ ('\x81\x85', 'Hello'), ('\x81\x86', 'World!'))
+ self.assertEqual('Hello', msgutil.receive_message(request))
+ self.assertEqual('World!', msgutil.receive_message(request))
+
+ payload = 'a' * 125
+ request = _create_request(('\x81\xfd', payload))
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ def test_receive_medium_message(self):
+ payload = 'a' * 126
+ request = _create_request(('\x81\xfe\x00\x7e', payload))
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ payload = 'a' * ((1 << 16) - 1)
+ request = _create_request(('\x81\xfe\xff\xff', payload))
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ def test_receive_large_message(self):
+ payload = 'a' * (1 << 16)
+ request = _create_request(
+ ('\x81\xff\x00\x00\x00\x00\x00\x01\x00\x00', payload))
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ def test_receive_length_not_encoded_using_minimal_number_of_bytes(self):
+ # Log warning on receiving bad payload length field that doesn't use
+ # minimal number of bytes but continue processing.
+
+ payload = 'a'
+ # 1 byte can be represented without extended payload length field.
+ request = _create_request(
+ ('\x81\xff\x00\x00\x00\x00\x00\x00\x00\x01', payload))
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ def test_receive_message_unicode(self):
+ request = _create_request(('\x81\x83', '\xe6\x9c\xac'))
+ # U+672c is encoded as e6,9c,ac in UTF-8
+ self.assertEqual(u'\u672c', msgutil.receive_message(request))
+
+ def test_receive_message_erroneous_unicode(self):
+ # \x80 and \x81 are invalid as UTF-8.
+ request = _create_request(('\x81\x82', '\x80\x81'))
+ # Invalid characters should raise InvalidUTF8Exception
+ self.assertRaises(InvalidUTF8Exception,
+ msgutil.receive_message,
+ request)
+
+ def test_receive_fragments(self):
+ request = _create_request(
+ ('\x01\x85', 'Hello'),
+ ('\x00\x81', ' '),
+ ('\x00\x85', 'World'),
+ ('\x80\x81', '!'))
+ self.assertEqual('Hello World!', msgutil.receive_message(request))
+
+ def test_receive_fragments_unicode(self):
+ # UTF-8 encodes U+6f22 into e6bca2 and U+5b57 into e5ad97.
+ request = _create_request(
+ ('\x01\x82', '\xe6\xbc'),
+ ('\x00\x82', '\xa2\xe5'),
+ ('\x80\x82', '\xad\x97'))
+ self.assertEqual(u'\u6f22\u5b57', msgutil.receive_message(request))
+
+ def test_receive_fragments_immediate_zero_termination(self):
+ request = _create_request(
+ ('\x01\x8c', 'Hello World!'), ('\x80\x80', ''))
+ self.assertEqual('Hello World!', msgutil.receive_message(request))
+
+ def test_receive_fragments_duplicate_start(self):
+ request = _create_request(
+ ('\x01\x85', 'Hello'), ('\x01\x85', 'World'))
+ self.assertRaises(msgutil.InvalidFrameException,
+ msgutil.receive_message,
+ request)
+
+ def test_receive_fragments_intermediate_but_not_started(self):
+ request = _create_request(('\x00\x85', 'Hello'))
+ self.assertRaises(msgutil.InvalidFrameException,
+ msgutil.receive_message,
+ request)
+
+ def test_receive_fragments_end_but_not_started(self):
+ request = _create_request(('\x80\x85', 'Hello'))
+ self.assertRaises(msgutil.InvalidFrameException,
+ msgutil.receive_message,
+ request)
+
+ def test_receive_message_discard(self):
+ request = _create_request(
+ ('\x8f\x86', 'IGNORE'), ('\x81\x85', 'Hello'),
+ ('\x8f\x89', 'DISREGARD'), ('\x81\x86', 'World!'))
+ self.assertRaises(msgutil.UnsupportedFrameException,
+ msgutil.receive_message, request)
+ self.assertEqual('Hello', msgutil.receive_message(request))
+ self.assertRaises(msgutil.UnsupportedFrameException,
+ msgutil.receive_message, request)
+ self.assertEqual('World!', msgutil.receive_message(request))
+
+ def test_receive_close(self):
+ request = _create_request(
+ ('\x88\x8a', struct.pack('!H', 1000) + 'Good bye'))
+ self.assertEqual(None, msgutil.receive_message(request))
+ self.assertEqual(1000, request.ws_close_code)
+ self.assertEqual('Good bye', request.ws_close_reason)
+
+ def test_send_longest_close(self):
+ reason = 'a' * 123
+ request = _create_request(
+ ('\x88\xfd',
+ struct.pack('!H', common.STATUS_NORMAL_CLOSURE) + reason))
+ request.ws_stream.close_connection(common.STATUS_NORMAL_CLOSURE,
+ reason)
+ self.assertEqual(request.ws_close_code, common.STATUS_NORMAL_CLOSURE)
+ self.assertEqual(request.ws_close_reason, reason)
+
+ def test_send_close_too_long(self):
+ request = _create_request()
+ self.assertRaises(msgutil.BadOperationException,
+ Stream.close_connection,
+ request.ws_stream,
+ common.STATUS_NORMAL_CLOSURE,
+ 'a' * 124)
+
+ def test_send_close_inconsistent_code_and_reason(self):
+ request = _create_request()
+ # reason parameter must not be specified when code is None.
+ self.assertRaises(msgutil.BadOperationException,
+ Stream.close_connection,
+ request.ws_stream,
+ None,
+ 'a')
+
+ def test_send_ping(self):
+ request = _create_request()
+ msgutil.send_ping(request, 'Hello World!')
+ self.assertEqual('\x89\x0cHello World!',
+ request.connection.written_data())
+
+ def test_send_longest_ping(self):
+ request = _create_request()
+ msgutil.send_ping(request, 'a' * 125)
+ self.assertEqual('\x89\x7d' + 'a' * 125,
+ request.connection.written_data())
+
+ def test_send_ping_too_long(self):
+ request = _create_request()
+ self.assertRaises(msgutil.BadOperationException,
+ msgutil.send_ping,
+ request,
+ 'a' * 126)
+
+ def test_receive_ping(self):
+ """Tests receiving a ping control frame."""
+
+ def handler(request, message):
+ request.called = True
+
+ # Stream automatically respond to ping with pong without any action
+ # by application layer.
+ request = _create_request(
+ ('\x89\x85', 'Hello'), ('\x81\x85', 'World'))
+ self.assertEqual('World', msgutil.receive_message(request))
+ self.assertEqual('\x8a\x05Hello',
+ request.connection.written_data())
+
+ request = _create_request(
+ ('\x89\x85', 'Hello'), ('\x81\x85', 'World'))
+ request.on_ping_handler = handler
+ self.assertEqual('World', msgutil.receive_message(request))
+ self.assertTrue(request.called)
+
+ def test_receive_longest_ping(self):
+ request = _create_request(
+ ('\x89\xfd', 'a' * 125), ('\x81\x85', 'World'))
+ self.assertEqual('World', msgutil.receive_message(request))
+ self.assertEqual('\x8a\x7d' + 'a' * 125,
+ request.connection.written_data())
+
+ def test_receive_ping_too_long(self):
+ request = _create_request(('\x89\xfe\x00\x7e', 'a' * 126))
+ self.assertRaises(msgutil.InvalidFrameException,
+ msgutil.receive_message,
+ request)
+
+ def test_receive_pong(self):
+ """Tests receiving a pong control frame."""
+
+ def handler(request, message):
+ request.called = True
+
+ request = _create_request(
+ ('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
+ request.on_pong_handler = handler
+ msgutil.send_ping(request, 'Hello')
+ self.assertEqual('\x89\x05Hello',
+ request.connection.written_data())
+ # Valid pong is received, but receive_message won't return for it.
+ self.assertEqual('World', msgutil.receive_message(request))
+ # Check that nothing was written after receive_message call.
+ self.assertEqual('\x89\x05Hello',
+ request.connection.written_data())
+
+ self.assertTrue(request.called)
+
+ def test_receive_unsolicited_pong(self):
+ # Unsolicited pong is allowed from HyBi 07.
+ request = _create_request(
+ ('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
+ msgutil.receive_message(request)
+
+ request = _create_request(
+ ('\x8a\x85', 'Hello'), ('\x81\x85', 'World'))
+ msgutil.send_ping(request, 'Jumbo')
+ # Body mismatch.
+ msgutil.receive_message(request)
+
+ def test_ping_cannot_be_fragmented(self):
+ request = _create_request(('\x09\x85', 'Hello'))
+ self.assertRaises(msgutil.InvalidFrameException,
+ msgutil.receive_message,
+ request)
+
+ def test_ping_with_too_long_payload(self):
+ request = _create_request(('\x89\xfe\x01\x00', 'a' * 256))
+ self.assertRaises(msgutil.InvalidFrameException,
+ msgutil.receive_message,
+ request)
+
+
+class DeflateFrameTest(unittest.TestCase):
+ """Tests for checking deflate-frame extension."""
+
+ def test_send_message(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', deflate_frame_request=extension)
+ msgutil.send_message(request, 'Hello')
+ msgutil.send_message(request, 'World')
+
+ expected = ''
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ expected += '\xc1%c' % len(compressed_hello)
+ expected += compressed_hello
+
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_world = compressed_world[:-4]
+ expected += '\xc1%c' % len(compressed_world)
+ expected += compressed_world
+
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_bfinal(self):
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', deflate_frame_request=extension)
+ self.assertEquals(1, len(request.ws_extension_processors))
+ deflate_frame_processor = request.ws_extension_processors[0]
+ deflate_frame_processor.set_bfinal(True)
+ msgutil.send_message(request, 'Hello')
+ msgutil.send_message(request, 'World')
+
+ expected = ''
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_FINISH)
+ compressed_hello = compressed_hello + chr(0)
+ expected += '\xc1%c' % len(compressed_hello)
+ expected += compressed_hello
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_FINISH)
+ compressed_world = compressed_world + chr(0)
+ expected += '\xc1%c' % len(compressed_world)
+ expected += compressed_world
+
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_comp_bit(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', deflate_frame_request=extension)
+ self.assertEquals(1, len(request.ws_extension_processors))
+ deflate_frame_processor = request.ws_extension_processors[0]
+ msgutil.send_message(request, 'Hello')
+ deflate_frame_processor.disable_outgoing_compression()
+ msgutil.send_message(request, 'Hello')
+ deflate_frame_processor.enable_outgoing_compression()
+ msgutil.send_message(request, 'Hello')
+
+ expected = ''
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ expected += '\xc1%c' % len(compressed_hello)
+ expected += compressed_hello
+
+ expected += '\x81\x05Hello'
+
+ compressed_2nd_hello = compress.compress('Hello')
+ compressed_2nd_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_2nd_hello = compressed_2nd_hello[:-4]
+ expected += '\xc1%c' % len(compressed_2nd_hello)
+ expected += compressed_2nd_hello
+
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_no_context_takeover_parameter(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ extension.add_parameter('no_context_takeover', None)
+ request = _create_request_from_rawdata(
+ '', deflate_frame_request=extension)
+ for i in xrange(3):
+ msgutil.send_message(request, 'Hello')
+
+ compressed_message = compress.compress('Hello')
+ compressed_message += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_message = compressed_message[:-4]
+ expected = '\xc1%c' % len(compressed_message)
+ expected += compressed_message
+
+ self.assertEqual(
+ expected + expected + expected, request.connection.written_data())
+
+ def test_bad_request_parameters(self):
+ """Tests that if there's anything wrong with deflate-frame extension
+ request, deflate-frame is rejected.
+ """
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ # max_window_bits less than 8 is illegal.
+ extension.add_parameter('max_window_bits', '7')
+ processor = DeflateFrameExtensionProcessor(extension)
+ self.assertEqual(None, processor.get_extension_response())
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ # max_window_bits greater than 15 is illegal.
+ extension.add_parameter('max_window_bits', '16')
+ processor = DeflateFrameExtensionProcessor(extension)
+ self.assertEqual(None, processor.get_extension_response())
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ # Non integer max_window_bits is illegal.
+ extension.add_parameter('max_window_bits', 'foobar')
+ processor = DeflateFrameExtensionProcessor(extension)
+ self.assertEqual(None, processor.get_extension_response())
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ # no_context_takeover must not have any value.
+ extension.add_parameter('no_context_takeover', 'foobar')
+ processor = DeflateFrameExtensionProcessor(extension)
+ self.assertEqual(None, processor.get_extension_response())
+
+ def test_response_parameters(self):
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ processor = DeflateFrameExtensionProcessor(extension)
+ processor.set_response_window_bits(8)
+ response = processor.get_extension_response()
+ self.assertTrue(response.has_parameter('max_window_bits'))
+ self.assertEqual('8', response.get_parameter_value('max_window_bits'))
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ processor = DeflateFrameExtensionProcessor(extension)
+ processor.set_response_no_context_takeover(True)
+ response = processor.get_extension_response()
+ self.assertTrue(response.has_parameter('no_context_takeover'))
+ self.assertTrue(
+ response.get_parameter_value('no_context_takeover') is None)
+
+ def test_receive_message(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ data = ''
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ data += '\xc1%c' % (len(compressed_hello) | 0x80)
+ data += _mask_hybi(compressed_hello)
+
+ compressed_websocket = compress.compress('WebSocket')
+ compressed_websocket += compress.flush(zlib.Z_FINISH)
+ compressed_websocket += '\x00'
+ data += '\xc1%c' % (len(compressed_websocket) | 0x80)
+ data += _mask_hybi(compressed_websocket)
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_world = compressed_world[:-4]
+ data += '\xc1%c' % (len(compressed_world) | 0x80)
+ data += _mask_hybi(compressed_world)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, deflate_frame_request=extension)
+ self.assertEqual('Hello', msgutil.receive_message(request))
+ self.assertEqual('WebSocket', msgutil.receive_message(request))
+ self.assertEqual('World', msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+ def test_receive_message_client_using_smaller_window(self):
+ """Test that frames coming from a client which is using smaller window
+ size that the server are correctly received.
+ """
+
+ # Using the smallest window bits of 8 for generating input frames.
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -8)
+
+ data = ''
+
+ # Use a frame whose content is bigger than the clients' DEFLATE window
+ # size before compression. The content mainly consists of 'a' but
+ # repetition of 'b' is put at the head and tail so that if the window
+ # size is big, the head is back-referenced but if small, not.
+ payload = 'b' * 64 + 'a' * 1024 + 'b' * 64
+ compressed_hello = compress.compress(payload)
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ data += '\xc1%c' % (len(compressed_hello) | 0x80)
+ data += _mask_hybi(compressed_hello)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, deflate_frame_request=extension)
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+ def test_receive_message_comp_bit(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ data = ''
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ data += '\xc1%c' % (len(compressed_hello) | 0x80)
+ data += _mask_hybi(compressed_hello)
+
+ data += '\x81\x85' + _mask_hybi('Hello')
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ compressed_2nd_hello = compress.compress('Hello')
+ compressed_2nd_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_2nd_hello = compressed_2nd_hello[:-4]
+ data += '\xc1%c' % (len(compressed_2nd_hello) | 0x80)
+ data += _mask_hybi(compressed_2nd_hello)
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, deflate_frame_request=extension)
+ for i in xrange(3):
+ self.assertEqual('Hello', msgutil.receive_message(request))
+
+ def test_receive_message_various_btype(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ data = ''
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ data += '\xc1%c' % (len(compressed_hello) | 0x80)
+ data += _mask_hybi(compressed_hello)
+
+ compressed_websocket = compress.compress('WebSocket')
+ compressed_websocket += compress.flush(zlib.Z_FINISH)
+ compressed_websocket += '\x00'
+ data += '\xc1%c' % (len(compressed_websocket) | 0x80)
+ data += _mask_hybi(compressed_websocket)
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_world = compressed_world[:-4]
+ data += '\xc1%c' % (len(compressed_world) | 0x80)
+ data += _mask_hybi(compressed_world)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, deflate_frame_request=extension)
+ self.assertEqual('Hello', msgutil.receive_message(request))
+ self.assertEqual('WebSocket', msgutil.receive_message(request))
+ self.assertEqual('World', msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+
+class PerMessageDeflateTest(unittest.TestCase):
+ """Tests for permessage-deflate extension."""
+
+ def test_send_message(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ msgutil.send_message(request, 'Hello')
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ expected = '\xc1%c' % len(compressed_hello)
+ expected += compressed_hello
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_empty_message(self):
+ """Test that an empty message is compressed correctly."""
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+
+ msgutil.send_message(request, '')
+
+ # Payload in binary: 0b00000010 0b00000000
+ # From LSB,
+ # - 1 bit of BFINAL (0)
+ # - 2 bits of BTYPE (01 that means fixed Huffman)
+ # - 7 bits of the first code (0000000 that is the code for the
+ # end-of-block)
+ # - 1 bit of BFINAL (0)
+ # - 2 bits of BTYPE (no compression)
+ # - 3 bits of padding
+ self.assertEqual('\xc1\x02\x02\x00',
+ request.connection.written_data())
+
+ def test_send_message_with_null_character(self):
+ """Test that a simple payload (one null) is framed correctly."""
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+
+ msgutil.send_message(request, '\x00')
+
+ # Payload in binary: 0b01100010 0b00000000 0b00000000
+ # From LSB,
+ # - 1 bit of BFINAL (0)
+ # - 2 bits of BTYPE (01 that means fixed Huffman)
+ # - 8 bits of the first code (00110000 that is the code for the literal
+ # alphabet 0x00)
+ # - 7 bits of the second code (0000000 that is the code for the
+ # end-of-block)
+ # - 1 bit of BFINAL (0)
+ # - 2 bits of BTYPE (no compression)
+ # - 2 bits of padding
+ self.assertEqual('\xc1\x03\x62\x00\x00',
+ request.connection.written_data())
+
+ def test_send_two_messages(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ msgutil.send_message(request, 'Hello')
+ msgutil.send_message(request, 'World')
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ expected = ''
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ expected += '\xc1%c' % len(compressed_hello)
+ expected += compressed_hello
+
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_world = compressed_world[:-4]
+ expected += '\xc1%c' % len(compressed_world)
+ expected += compressed_world
+
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_fragmented(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ msgutil.send_message(request, 'Hello', end=False)
+ msgutil.send_message(request, 'Goodbye', end=False)
+ msgutil.send_message(request, 'World')
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ expected = '\x41%c' % len(compressed_hello)
+ expected += compressed_hello
+ compressed_goodbye = compress.compress('Goodbye')
+ compressed_goodbye += compress.flush(zlib.Z_SYNC_FLUSH)
+ expected += '\x00%c' % len(compressed_goodbye)
+ expected += compressed_goodbye
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_world = compressed_world[:-4]
+ expected += '\x80%c' % len(compressed_world)
+ expected += compressed_world
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_fragmented_empty_first_frame(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ msgutil.send_message(request, '', end=False)
+ msgutil.send_message(request, 'Hello')
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ expected = '\x41%c' % len(compressed_hello)
+ expected += compressed_hello
+ compressed_empty = compress.compress('Hello')
+ compressed_empty += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_empty = compressed_empty[:-4]
+ expected += '\x80%c' % len(compressed_empty)
+ expected += compressed_empty
+ print '%r' % expected
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_fragmented_empty_last_frame(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ msgutil.send_message(request, 'Hello', end=False)
+ msgutil.send_message(request, '')
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ expected = '\x41%c' % len(compressed_hello)
+ expected += compressed_hello
+ compressed_empty = compress.compress('')
+ compressed_empty += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_empty = compressed_empty[:-4]
+ expected += '\x80%c' % len(compressed_empty)
+ expected += compressed_empty
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_send_message_using_small_window(self):
+ common_part = 'abcdefghijklmnopqrstuvwxyz'
+ test_message = common_part + '-' * 30000 + common_part
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ extension.add_parameter('server_max_window_bits', '8')
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ msgutil.send_message(request, test_message)
+
+ expected_websocket_header_size = 2
+ expected_websocket_payload_size = 91
+
+ actual_frame = request.connection.written_data()
+ self.assertEqual(expected_websocket_header_size +
+ expected_websocket_payload_size,
+ len(actual_frame))
+ actual_header = actual_frame[0:expected_websocket_header_size]
+ actual_payload = actual_frame[expected_websocket_header_size:]
+
+ self.assertEqual(
+ '\xc1%c' % expected_websocket_payload_size, actual_header)
+ decompress = zlib.decompressobj(-8)
+ decompressed_message = decompress.decompress(
+ actual_payload + '\x00\x00\xff\xff')
+ decompressed_message += decompress.flush()
+ self.assertEqual(test_message, decompressed_message)
+ self.assertEqual(0, len(decompress.unused_data))
+ self.assertEqual(0, len(decompress.unconsumed_tail))
+
+ def test_send_message_no_context_takeover_parameter(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ extension.add_parameter('server_no_context_takeover', None)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ for i in xrange(3):
+ msgutil.send_message(request, 'Hello', end=False)
+ msgutil.send_message(request, 'Hello', end=True)
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ first_hello = compress.compress('Hello')
+ first_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ expected = '\x41%c' % len(first_hello)
+ expected += first_hello
+ second_hello = compress.compress('Hello')
+ second_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ second_hello = second_hello[:-4]
+ expected += '\x80%c' % len(second_hello)
+ expected += second_hello
+
+ self.assertEqual(
+ expected + expected + expected,
+ request.connection.written_data())
+
+ def test_send_message_fragmented_bfinal(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ '', permessage_deflate_request=extension)
+ self.assertEquals(1, len(request.ws_extension_processors))
+ request.ws_extension_processors[0].set_bfinal(True)
+ msgutil.send_message(request, 'Hello', end=False)
+ msgutil.send_message(request, 'World', end=True)
+
+ expected = ''
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_FINISH)
+ compressed_hello = compressed_hello + chr(0)
+ expected += '\x41%c' % len(compressed_hello)
+ expected += compressed_hello
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_FINISH)
+ compressed_world = compressed_world + chr(0)
+ expected += '\x80%c' % len(compressed_world)
+ expected += compressed_world
+
+ self.assertEqual(expected, request.connection.written_data())
+
+ def test_receive_message_deflate(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ data = '\xc1%c' % (len(compressed_hello) | 0x80)
+ data += _mask_hybi(compressed_hello)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, permessage_deflate_request=extension)
+ self.assertEqual('Hello', msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+ def test_receive_message_random_section(self):
+ """Test that a compressed message fragmented into lots of chunks is
+ correctly received.
+ """
+
+ random.seed(a=0)
+ payload = ''.join(
+ [chr(random.randint(0, 255)) for i in xrange(1000)])
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_payload = compress.compress(payload)
+ compressed_payload += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_payload = compressed_payload[:-4]
+
+ # Fragment the compressed payload into lots of frames.
+ bytes_chunked = 0
+ data = ''
+ frame_count = 0
+
+ chunk_sizes = []
+
+ while bytes_chunked < len(compressed_payload):
+ # Make sure that
+ # - the length of chunks are equal or less than 125 so that we can
+ # use 1 octet length header format for all frames.
+ # - at least 10 chunks are created.
+ chunk_size = random.randint(
+ 1, min(125,
+ len(compressed_payload) / 10,
+ len(compressed_payload) - bytes_chunked))
+ chunk_sizes.append(chunk_size)
+ chunk = compressed_payload[
+ bytes_chunked:bytes_chunked + chunk_size]
+ bytes_chunked += chunk_size
+
+ first_octet = 0x00
+ if len(data) == 0:
+ first_octet = first_octet | 0x42
+ if bytes_chunked == len(compressed_payload):
+ first_octet = first_octet | 0x80
+
+ data += '%c%c' % (first_octet, chunk_size | 0x80)
+ data += _mask_hybi(chunk)
+
+ frame_count += 1
+
+ print "Chunk sizes: %r" % chunk_sizes
+ self.assertTrue(len(chunk_sizes) > 10)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, permessage_deflate_request=extension)
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+ def test_receive_two_messages(self):
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ data = ''
+
+ compressed_hello = compress.compress('HelloWebSocket')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ split_position = len(compressed_hello) / 2
+ data += '\x41%c' % (split_position | 0x80)
+ data += _mask_hybi(compressed_hello[:split_position])
+
+ data += '\x80%c' % ((len(compressed_hello) - split_position) | 0x80)
+ data += _mask_hybi(compressed_hello[split_position:])
+
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+
+ compressed_world = compress.compress('World')
+ compressed_world += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_world = compressed_world[:-4]
+ data += '\xc1%c' % (len(compressed_world) | 0x80)
+ data += _mask_hybi(compressed_world)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, permessage_deflate_request=extension)
+ self.assertEqual('HelloWebSocket', msgutil.receive_message(request))
+ self.assertEqual('World', msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+ def test_receive_message_mixed_btype(self):
+ """Test that a message compressed using lots of DEFLATE blocks with
+ various flush mode is correctly received.
+ """
+
+ random.seed(a=0)
+ payload = ''.join(
+ [chr(random.randint(0, 255)) for i in xrange(1000)])
+
+ compress = None
+
+ # Fragment the compressed payload into lots of frames.
+ bytes_chunked = 0
+ compressed_payload = ''
+
+ chunk_sizes = []
+ methods = []
+ sync_used = False
+ finish_used = False
+
+ while bytes_chunked < len(payload):
+ # Make sure at least 10 chunks are created.
+ chunk_size = random.randint(
+ 1, min(100, len(payload) - bytes_chunked))
+ chunk_sizes.append(chunk_size)
+ chunk = payload[bytes_chunked:bytes_chunked + chunk_size]
+
+ bytes_chunked += chunk_size
+
+ if compress is None:
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION,
+ zlib.DEFLATED,
+ -zlib.MAX_WBITS)
+
+ if bytes_chunked == len(payload):
+ compressed_payload += compress.compress(chunk)
+ compressed_payload += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_payload = compressed_payload[:-4]
+ else:
+ method = random.randint(0, 1)
+ methods.append(method)
+ if method == 0:
+ compressed_payload += compress.compress(chunk)
+ compressed_payload += compress.flush(zlib.Z_SYNC_FLUSH)
+ sync_used = True
+ else:
+ compressed_payload += compress.compress(chunk)
+ compressed_payload += compress.flush(zlib.Z_FINISH)
+ compress = None
+ finish_used = True
+
+ print "Chunk sizes: %r" % chunk_sizes
+ self.assertTrue(len(chunk_sizes) > 10)
+ print "Methods: %r" % methods
+ self.assertTrue(sync_used)
+ self.assertTrue(finish_used)
+
+ self.assertTrue(125 < len(compressed_payload))
+ self.assertTrue(len(compressed_payload) < 65536)
+ data = '\xc2\xfe' + struct.pack('!H', len(compressed_payload))
+ data += _mask_hybi(compressed_payload)
+
+ # Close frame
+ data += '\x88\x8a' + _mask_hybi(struct.pack('!H', 1000) + 'Good bye')
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_DEFLATE_EXTENSION)
+ request = _create_request_from_rawdata(
+ data, permessage_deflate_request=extension)
+ self.assertEqual(payload, msgutil.receive_message(request))
+
+ self.assertEqual(None, msgutil.receive_message(request))
+
+
+class PerMessageCompressTest(unittest.TestCase):
+ """Tests for checking permessage-compression extension."""
+
+ def test_deflate_response_parameters(self):
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_COMPRESSION_EXTENSION)
+ extension.add_parameter('method', 'deflate')
+ processor = PerMessageCompressExtensionProcessor(extension)
+ response = processor.get_extension_response()
+ self.assertEqual('deflate',
+ response.get_parameter_value('method'))
+
+ extension = common.ExtensionParameter(
+ common.PERMESSAGE_COMPRESSION_EXTENSION)
+ extension.add_parameter('method', 'deflate')
+ processor = PerMessageCompressExtensionProcessor(extension)
+
+ def _compression_processor_hook(compression_processor):
+ compression_processor.set_client_max_window_bits(8)
+ compression_processor.set_client_no_context_takeover(True)
+ processor.set_compression_processor_hook(
+ _compression_processor_hook)
+ response = processor.get_extension_response()
+ self.assertEqual(
+ 'deflate; client_max_window_bits=8; client_no_context_takeover',
+ response.get_parameter_value('method'))
+
+
+class MessageTestHixie75(unittest.TestCase):
+ """Tests for draft-hixie-thewebsocketprotocol-76 stream class."""
+
+ def test_send_message(self):
+ request = _create_request_hixie75()
+ msgutil.send_message(request, 'Hello')
+ self.assertEqual('\x00Hello\xff', request.connection.written_data())
+
+ def test_send_message_unicode(self):
+ request = _create_request_hixie75()
+ msgutil.send_message(request, u'\u65e5')
+ # U+65e5 is encoded as e6,97,a5 in UTF-8
+ self.assertEqual('\x00\xe6\x97\xa5\xff',
+ request.connection.written_data())
+
+ def test_receive_message(self):
+ request = _create_request_hixie75('\x00Hello\xff\x00World!\xff')
+ self.assertEqual('Hello', msgutil.receive_message(request))
+ self.assertEqual('World!', msgutil.receive_message(request))
+
+ def test_receive_message_unicode(self):
+ request = _create_request_hixie75('\x00\xe6\x9c\xac\xff')
+ # U+672c is encoded as e6,9c,ac in UTF-8
+ self.assertEqual(u'\u672c', msgutil.receive_message(request))
+
+ def test_receive_message_erroneous_unicode(self):
+ # \x80 and \x81 are invalid as UTF-8.
+ request = _create_request_hixie75('\x00\x80\x81\xff')
+ # Invalid characters should be replaced with
+ # U+fffd REPLACEMENT CHARACTER
+ self.assertEqual(u'\ufffd\ufffd', msgutil.receive_message(request))
+
+ def test_receive_message_discard(self):
+ request = _create_request_hixie75('\x80\x06IGNORE\x00Hello\xff'
+ '\x01DISREGARD\xff\x00World!\xff')
+ self.assertEqual('Hello', msgutil.receive_message(request))
+ self.assertEqual('World!', msgutil.receive_message(request))
+
+
+class MessageReceiverTest(unittest.TestCase):
+ """Tests the Stream class using MessageReceiver."""
+
+ def test_queue(self):
+ request = _create_blocking_request()
+ receiver = msgutil.MessageReceiver(request)
+
+ self.assertEqual(None, receiver.receive_nowait())
+
+ request.connection.put_bytes('\x81\x86' + _mask_hybi('Hello!'))
+ self.assertEqual('Hello!', receiver.receive())
+
+ def test_onmessage(self):
+ onmessage_queue = Queue.Queue()
+
+ def onmessage_handler(message):
+ onmessage_queue.put(message)
+
+ request = _create_blocking_request()
+ receiver = msgutil.MessageReceiver(request, onmessage_handler)
+
+ request.connection.put_bytes('\x81\x86' + _mask_hybi('Hello!'))
+ self.assertEqual('Hello!', onmessage_queue.get())
+
+
+class MessageReceiverHixie75Test(unittest.TestCase):
+ """Tests the StreamHixie75 class using MessageReceiver."""
+
+ def test_queue(self):
+ request = _create_blocking_request_hixie75()
+ receiver = msgutil.MessageReceiver(request)
+
+ self.assertEqual(None, receiver.receive_nowait())
+
+ request.connection.put_bytes('\x00Hello!\xff')
+ self.assertEqual('Hello!', receiver.receive())
+
+ def test_onmessage(self):
+ onmessage_queue = Queue.Queue()
+
+ def onmessage_handler(message):
+ onmessage_queue.put(message)
+
+ request = _create_blocking_request_hixie75()
+ receiver = msgutil.MessageReceiver(request, onmessage_handler)
+
+ request.connection.put_bytes('\x00Hello!\xff')
+ self.assertEqual('Hello!', onmessage_queue.get())
+
+
+class MessageSenderTest(unittest.TestCase):
+ """Tests the Stream class using MessageSender."""
+
+ def test_send(self):
+ request = _create_blocking_request()
+ sender = msgutil.MessageSender(request)
+
+ sender.send('World')
+ self.assertEqual('\x81\x05World', request.connection.written_data())
+
+ def test_send_nowait(self):
+ # Use a queue to check the bytes written by MessageSender.
+ # request.connection.written_data() cannot be used here because
+ # MessageSender runs in a separate thread.
+ send_queue = Queue.Queue()
+
+ def write(bytes):
+ send_queue.put(bytes)
+
+ request = _create_blocking_request()
+ request.connection.write = write
+
+ sender = msgutil.MessageSender(request)
+
+ sender.send_nowait('Hello')
+ sender.send_nowait('World')
+ self.assertEqual('\x81\x05Hello', send_queue.get())
+ self.assertEqual('\x81\x05World', send_queue.get())
+
+
+class MessageSenderHixie75Test(unittest.TestCase):
+ """Tests the StreamHixie75 class using MessageSender."""
+
+ def test_send(self):
+ request = _create_blocking_request_hixie75()
+ sender = msgutil.MessageSender(request)
+
+ sender.send('World')
+ self.assertEqual('\x00World\xff', request.connection.written_data())
+
+ def test_send_nowait(self):
+ # Use a queue to check the bytes written by MessageSender.
+ # request.connection.written_data() cannot be used here because
+ # MessageSender runs in a separate thread.
+ send_queue = Queue.Queue()
+
+ def write(bytes):
+ send_queue.put(bytes)
+
+ request = _create_blocking_request_hixie75()
+ request.connection.write = write
+
+ sender = msgutil.MessageSender(request)
+
+ sender.send_nowait('Hello')
+ sender.send_nowait('World')
+ self.assertEqual('\x00Hello\xff', send_queue.get())
+ self.assertEqual('\x00World\xff', send_queue.get())
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_mux.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_mux.py
new file mode 100644
index 000000000..d4598944e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_mux.py
@@ -0,0 +1,2089 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for mux module."""
+
+import Queue
+import copy
+import logging
+import optparse
+import struct
+import sys
+import unittest
+import time
+import zlib
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import common
+from mod_pywebsocket import mux
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+from mod_pywebsocket._stream_hybi import parse_frame
+from mod_pywebsocket.extensions import MuxExtensionProcessor
+
+
+import mock
+
+
+_TEST_HEADERS = {'Host': 'server.example.com',
+ 'Upgrade': 'websocket',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': '13',
+ 'Origin': 'http://example.com'}
+
+
+class _OutgoingChannelData(object):
+ def __init__(self):
+ self.messages = []
+ self.control_messages = []
+
+ self.builder = mux._InnerMessageBuilder()
+
+class _MockMuxConnection(mock.MockBlockingConn):
+ """Mock class of mod_python connection for mux."""
+
+ def __init__(self):
+ mock.MockBlockingConn.__init__(self)
+ self._control_blocks = []
+ self._channel_data = {}
+
+ self._current_opcode = None
+ self._pending_fragments = []
+
+ self.server_close_code = None
+
+ def write(self, data):
+ """Override MockBlockingConn.write."""
+
+ self._current_data = data
+ self._position = 0
+
+ def _receive_bytes(length):
+ if self._position + length > len(self._current_data):
+ raise ConnectionTerminatedException(
+ 'Failed to receive %d bytes from encapsulated '
+ 'frame' % length)
+ data = self._current_data[self._position:self._position+length]
+ self._position += length
+ return data
+
+ # Parse physical frames and assemble a message if the message is
+ # fragmented.
+ opcode, payload, fin, rsv1, rsv2, rsv3 = (
+ parse_frame(_receive_bytes, unmask_receive=False))
+
+ self._pending_fragments.append(payload)
+
+ if self._current_opcode is None:
+ if opcode == common.OPCODE_CONTINUATION:
+ raise Exception('Sending invalid continuation opcode')
+ self._current_opcode = opcode
+ else:
+ if opcode != common.OPCODE_CONTINUATION:
+ raise Exception('Sending invalid opcode %d' % opcode)
+ if not fin:
+ return
+
+ inner_frame_data = ''.join(self._pending_fragments)
+ self._pending_fragments = []
+ self._current_opcode = None
+
+ # Handle a control message on the physical channel.
+ # TODO(bashi): Support other opcodes if needed.
+ if opcode == common.OPCODE_CLOSE:
+ if len(payload) >= 2:
+ self.server_close_code = struct.unpack('!H', payload[:2])[0]
+ close_body = create_closing_handshake_body(
+ common.STATUS_NORMAL_CLOSURE, '')
+ close_frame = create_close_frame(close_body, mask=True)
+ self.put_bytes(close_frame)
+ return
+
+ # Parse the payload of the message on physical channel.
+ parser = mux._MuxFramePayloadParser(inner_frame_data)
+ channel_id = parser.read_channel_id()
+ if channel_id == mux._CONTROL_CHANNEL_ID:
+ self._control_blocks.extend(list(parser.read_control_blocks()))
+ return
+
+ if not channel_id in self._channel_data:
+ self._channel_data[channel_id] = _OutgoingChannelData()
+ channel_data = self._channel_data[channel_id]
+
+ # Parse logical frames and assemble an inner (logical) message.
+ (inner_fin, inner_rsv1, inner_rsv2, inner_rsv3, inner_opcode,
+ inner_payload) = parser.read_inner_frame()
+ inner_frame = Frame(inner_fin, inner_rsv1, inner_rsv2, inner_rsv3,
+ inner_opcode, inner_payload)
+ message = channel_data.builder.build(inner_frame)
+ if message is None:
+ return
+
+ if (message.opcode == common.OPCODE_TEXT or
+ message.opcode == common.OPCODE_BINARY):
+ channel_data.messages.append(message.payload)
+
+ self.on_data_message(message.payload)
+ else:
+ channel_data.control_messages.append(
+ {'opcode': message.opcode,
+ 'message': message.payload})
+
+ def on_data_message(self, message):
+ pass
+
+ def get_written_control_blocks(self):
+ return self._control_blocks
+
+ def get_written_messages(self, channel_id):
+ return self._channel_data[channel_id].messages
+
+ def get_written_control_messages(self, channel_id):
+ return self._channel_data[channel_id].control_messages
+
+
+class _FailOnWriteConnection(_MockMuxConnection):
+ """Specicialized version of _MockMuxConnection. Its write() method raises
+ an exception for testing when a data message is written.
+ """
+
+ def on_data_message(self, message):
+ """Override to raise an exception."""
+
+ raise Exception('Intentional failure')
+
+
+class _ChannelEvent(object):
+ """A structure that records channel events."""
+
+ def __init__(self):
+ self.request = None
+ self.messages = []
+ self.exception = None
+ self.client_initiated_closing = False
+
+
+class _MuxMockDispatcher(object):
+ """Mock class of dispatch.Dispatcher for mux."""
+
+ def __init__(self):
+ self.channel_events = {}
+
+ def do_extra_handshake(self, request):
+ if request.ws_requested_protocols is not None:
+ request.ws_protocol = request.ws_requested_protocols[0]
+
+ def _do_echo(self, request, channel_events):
+ while True:
+ message = request.ws_stream.receive_message()
+ if message == None:
+ channel_events.client_initiated_closing = True
+ return
+ if message == 'Goodbye':
+ return
+ channel_events.messages.append(message)
+ # echo back
+ request.ws_stream.send_message(message)
+
+ def _do_ping(self, request, channel_events):
+ request.ws_stream.send_ping('Ping!')
+
+ def _do_ping_while_hello_world(self, request, channel_events):
+ request.ws_stream.send_message('Hello ', end=False)
+ request.ws_stream.send_ping('Ping!')
+ request.ws_stream.send_message('World!', end=True)
+
+ def _do_two_ping_while_hello_world(self, request, channel_events):
+ request.ws_stream.send_message('Hello ', end=False)
+ request.ws_stream.send_ping('Ping!')
+ request.ws_stream.send_ping('Pong!')
+ request.ws_stream.send_message('World!', end=True)
+
+ def transfer_data(self, request):
+ self.channel_events[request.channel_id] = _ChannelEvent()
+ self.channel_events[request.channel_id].request = request
+
+ try:
+ # Note: more handler will be added.
+ if request.uri.endswith('echo'):
+ self._do_echo(request,
+ self.channel_events[request.channel_id])
+ elif request.uri.endswith('ping'):
+ self._do_ping(request,
+ self.channel_events[request.channel_id])
+ elif request.uri.endswith('two_ping_while_hello_world'):
+ self._do_two_ping_while_hello_world(
+ request, self.channel_events[request.channel_id])
+ elif request.uri.endswith('ping_while_hello_world'):
+ self._do_ping_while_hello_world(
+ request, self.channel_events[request.channel_id])
+ else:
+ raise ValueError('Cannot handle path %r' % request.path)
+ if not request.server_terminated:
+ request.ws_stream.close_connection()
+ except ConnectionTerminatedException, e:
+ self.channel_events[request.channel_id].exception = e
+ except Exception, e:
+ self.channel_events[request.channel_id].exception = e
+ raise
+
+
+def _create_mock_request(connection=None, logical_channel_extensions=None):
+ if connection is None:
+ connection = _MockMuxConnection()
+
+ request = mock.MockRequest(uri='/echo',
+ headers_in=_TEST_HEADERS,
+ connection=connection)
+ request.ws_stream = Stream(request, options=StreamOptions())
+ request.mux_processor = MuxExtensionProcessor(
+ common.ExtensionParameter(common.MUX_EXTENSION))
+ if logical_channel_extensions is not None:
+ request.mux_processor.set_extensions(logical_channel_extensions)
+ request.mux_processor.set_quota(8 * 1024)
+ return request
+
+
+def _create_add_channel_request_frame(channel_id, encoding, encoded_handshake):
+ # Allow invalid encoding for testing.
+ first_byte = ((mux._MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding)
+ payload = (chr(first_byte) +
+ mux._encode_channel_id(channel_id) +
+ mux._encode_number(len(encoded_handshake)) +
+ encoded_handshake)
+ return create_binary_frame(
+ (mux._encode_channel_id(mux._CONTROL_CHANNEL_ID) + payload), mask=True)
+
+
+def _create_drop_channel_frame(channel_id, code=None, message=''):
+ payload = mux._create_drop_channel(channel_id, code, message)
+ return create_binary_frame(
+ (mux._encode_channel_id(mux._CONTROL_CHANNEL_ID) + payload), mask=True)
+
+
+def _create_flow_control_frame(channel_id, replenished_quota):
+ payload = mux._create_flow_control(channel_id, replenished_quota)
+ return create_binary_frame(
+ (mux._encode_channel_id(mux._CONTROL_CHANNEL_ID) + payload), mask=True)
+
+
+def _create_logical_frame(channel_id, message, opcode=common.OPCODE_BINARY,
+ fin=True, rsv1=False, rsv2=False, rsv3=False,
+ mask=True):
+ bits = chr((fin << 7) | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4) | opcode)
+ payload = mux._encode_channel_id(channel_id) + bits + message
+ return create_binary_frame(payload, mask=True)
+
+
+def _create_request_header(path='/echo', extensions=None):
+ headers = (
+ 'GET %s HTTP/1.1\r\n'
+ 'Host: server.example.com\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Origin: http://example.com\r\n') % path
+ if extensions:
+ headers += '%s: %s' % (
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER, extensions)
+ return headers
+
+
+class MuxTest(unittest.TestCase):
+ """A unittest for mux module."""
+
+ def test_channel_id_decode(self):
+ data = '\x00\x01\xbf\xff\xdf\xff\xff\xff\xff\xff\xff'
+ parser = mux._MuxFramePayloadParser(data)
+ channel_id = parser.read_channel_id()
+ self.assertEqual(0, channel_id)
+ channel_id = parser.read_channel_id()
+ self.assertEqual(1, channel_id)
+ channel_id = parser.read_channel_id()
+ self.assertEqual(2 ** 14 - 1, channel_id)
+ channel_id = parser.read_channel_id()
+ self.assertEqual(2 ** 21 - 1, channel_id)
+ channel_id = parser.read_channel_id()
+ self.assertEqual(2 ** 29 - 1, channel_id)
+ self.assertEqual(len(data), parser._read_position)
+
+ def test_channel_id_encode(self):
+ encoded = mux._encode_channel_id(0)
+ self.assertEqual('\x00', encoded)
+ encoded = mux._encode_channel_id(2 ** 14 - 1)
+ self.assertEqual('\xbf\xff', encoded)
+ encoded = mux._encode_channel_id(2 ** 14)
+ self.assertEqual('\xc0@\x00', encoded)
+ encoded = mux._encode_channel_id(2 ** 21 - 1)
+ self.assertEqual('\xdf\xff\xff', encoded)
+ encoded = mux._encode_channel_id(2 ** 21)
+ self.assertEqual('\xe0 \x00\x00', encoded)
+ encoded = mux._encode_channel_id(2 ** 29 - 1)
+ self.assertEqual('\xff\xff\xff\xff', encoded)
+ # channel_id is too large
+ self.assertRaises(ValueError,
+ mux._encode_channel_id,
+ 2 ** 29)
+
+ def test_read_multiple_control_blocks(self):
+ # Use AddChannelRequest because it can contain arbitrary length of data
+ data = ('\x00\x01\x01a'
+ '\x00\x02\x7d%s'
+ '\x00\x03\x7e\xff\xff%s'
+ '\x00\x04\x7f\x00\x00\x00\x00\x00\x01\x00\x00%s') % (
+ 'a' * 0x7d, 'b' * 0xffff, 'c' * 0x10000)
+ parser = mux._MuxFramePayloadParser(data)
+ blocks = list(parser.read_control_blocks())
+ self.assertEqual(4, len(blocks))
+
+ self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[0].opcode)
+ self.assertEqual(1, blocks[0].channel_id)
+ self.assertEqual(1, len(blocks[0].encoded_handshake))
+
+ self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[1].opcode)
+ self.assertEqual(2, blocks[1].channel_id)
+ self.assertEqual(0x7d, len(blocks[1].encoded_handshake))
+
+ self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[2].opcode)
+ self.assertEqual(3, blocks[2].channel_id)
+ self.assertEqual(0xffff, len(blocks[2].encoded_handshake))
+
+ self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[3].opcode)
+ self.assertEqual(4, blocks[3].channel_id)
+ self.assertEqual(0x10000, len(blocks[3].encoded_handshake))
+
+ self.assertEqual(len(data), parser._read_position)
+
+ def test_read_add_channel_request(self):
+ data = '\x00\x01\x01a'
+ parser = mux._MuxFramePayloadParser(data)
+ blocks = list(parser.read_control_blocks())
+ self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[0].opcode)
+ self.assertEqual(1, blocks[0].channel_id)
+ self.assertEqual(1, len(blocks[0].encoded_handshake))
+
+ def test_read_drop_channel(self):
+ data = '\x60\x01\x00'
+ parser = mux._MuxFramePayloadParser(data)
+ blocks = list(parser.read_control_blocks())
+ self.assertEqual(1, len(blocks))
+ self.assertEqual(1, blocks[0].channel_id)
+ self.assertEqual(mux._MUX_OPCODE_DROP_CHANNEL, blocks[0].opcode)
+ self.assertEqual(None, blocks[0].drop_code)
+ self.assertEqual(0, len(blocks[0].drop_message))
+
+ data = '\x60\x02\x09\x03\xe8Success'
+ parser = mux._MuxFramePayloadParser(data)
+ blocks = list(parser.read_control_blocks())
+ self.assertEqual(1, len(blocks))
+ self.assertEqual(2, blocks[0].channel_id)
+ self.assertEqual(mux._MUX_OPCODE_DROP_CHANNEL, blocks[0].opcode)
+ self.assertEqual(1000, blocks[0].drop_code)
+ self.assertEqual('Success', blocks[0].drop_message)
+
+ # Reason is too short.
+ data = '\x60\x01\x01\x00'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(mux.PhysicalConnectionError,
+ lambda: list(parser.read_control_blocks()))
+
+ def test_read_flow_control(self):
+ data = '\x40\x01\x02'
+ parser = mux._MuxFramePayloadParser(data)
+ blocks = list(parser.read_control_blocks())
+ self.assertEqual(1, len(blocks))
+ self.assertEqual(1, blocks[0].channel_id)
+ self.assertEqual(mux._MUX_OPCODE_FLOW_CONTROL, blocks[0].opcode)
+ self.assertEqual(2, blocks[0].send_quota)
+
+ def test_read_new_channel_slot(self):
+ data = '\x80\x01\x02\x02\x03'
+ parser = mux._MuxFramePayloadParser(data)
+ # TODO(bashi): Implement
+ self.assertRaises(mux.PhysicalConnectionError,
+ lambda: list(parser.read_control_blocks()))
+
+ def test_read_invalid_number_field_in_control_block(self):
+ # No number field.
+ data = ''
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ # The last two bytes are missing.
+ data = '\x7e'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ # Missing the last one byte.
+ data = '\x7f\x00\x00\x00\x00\x00\x01\x00'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ # The length of number field is too large.
+ data = '\x7f\xff\xff\xff\xff\xff\xff\xff\xff'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ # The msb of the first byte is set.
+ data = '\x80'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ # Using 3 bytes encoding for 125.
+ data = '\x7e\x00\x7d'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ # Using 9 bytes encoding for 0xffff
+ data = '\x7f\x00\x00\x00\x00\x00\x00\xff\xff'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(ValueError, parser._read_number)
+
+ def test_read_invalid_size_and_contents(self):
+ # Only contain number field.
+ data = '\x01'
+ parser = mux._MuxFramePayloadParser(data)
+ self.assertRaises(mux.PhysicalConnectionError,
+ parser._read_size_and_contents)
+
+ def test_create_add_channel_response(self):
+ data = mux._create_add_channel_response(channel_id=1,
+ encoded_handshake='FooBar',
+ encoding=0,
+ rejected=False)
+ self.assertEqual('\x20\x01\x06FooBar', data)
+
+ data = mux._create_add_channel_response(channel_id=2,
+ encoded_handshake='Hello',
+ encoding=1,
+ rejected=True)
+ self.assertEqual('\x31\x02\x05Hello', data)
+
+ def test_create_drop_channel(self):
+ data = mux._create_drop_channel(channel_id=1)
+ self.assertEqual('\x60\x01\x00', data)
+
+ data = mux._create_drop_channel(channel_id=1,
+ code=2000,
+ message='error')
+ self.assertEqual('\x60\x01\x07\x07\xd0error', data)
+
+ # reason must be empty if code is None
+ self.assertRaises(ValueError,
+ mux._create_drop_channel,
+ 1, None, 'FooBar')
+
+ def test_parse_request_text(self):
+ request_text = _create_request_header()
+ command, path, version, headers = mux._parse_request_text(request_text)
+ self.assertEqual('GET', command)
+ self.assertEqual('/echo', path)
+ self.assertEqual('HTTP/1.1', version)
+ self.assertEqual(3, len(headers))
+ self.assertEqual('server.example.com', headers['Host'])
+ self.assertEqual('http://example.com', headers['Origin'])
+
+
+class MuxHandlerTest(unittest.TestCase):
+
+ def test_add_channel(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=3, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=3,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='World'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual([], dispatcher.channel_events[1].messages)
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ self.assertEqual(['World'], dispatcher.channel_events[3].messages)
+ # Channel 2
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('Hello', messages[0])
+ # Channel 3
+ messages = request.connection.get_written_messages(3)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('World', messages[0])
+ control_blocks = request.connection.get_written_control_blocks()
+ # There should be 8 control blocks:
+ # - 1 NewChannelSlot
+ # - 2 AddChannelResponses for channel id 2 and 3
+ # - 6 FlowControls for channel id 1 (initialize), 'Hello', 'World',
+ # and 3 'Goodbye's
+ self.assertEqual(9, len(control_blocks))
+
+ def test_physical_connection_write_failure(self):
+ # Use _FailOnWriteConnection.
+ request = _create_mock_request(connection=_FailOnWriteConnection())
+
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ # Let the worker echo back 'Hello'. It causes _FailOnWriteConnection
+ # raising an exception.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Hello'))
+
+ # Let the worker exit. This will be unnecessary when
+ # _LogicalConnection.write() is changed to throw an exception if
+ # woke up by on_writer_done.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ # All threads should be done.
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ def test_send_blocked(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # On receiving this 'Hello', the server tries to echo back 'Hello',
+ # but it will be blocked since there's no send quota available for the
+ # channel 2.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+
+ # Wait until the worker is blocked due to send quota shortage.
+ time.sleep(1)
+
+ # Close the channel 2. The worker should be notified of the end of
+ # writer thread and stop waiting for send quota to be replenished.
+ drop_channel = _create_drop_channel_frame(channel_id=2)
+
+ request.connection.put_bytes(drop_channel)
+
+ # Make sure the channel 1 is also closed.
+ drop_channel = _create_drop_channel_frame(channel_id=1)
+ request.connection.put_bytes(drop_channel)
+
+ # All threads should be done.
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ def test_add_channel_delta_encoding(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ delta = 'GET /echo HTTP/1.1\r\n\r\n'
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=1, encoded_handshake=delta)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('Hello', messages[0])
+
+ def test_add_channel_delta_encoding_override(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ # Override Sec-WebSocket-Protocol.
+ delta = ('GET /echo HTTP/1.1\r\n'
+ 'Sec-WebSocket-Protocol: x-foo\r\n'
+ '\r\n')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=1, encoded_handshake=delta)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('Hello', messages[0])
+ self.assertEqual('x-foo',
+ dispatcher.channel_events[2].request.ws_protocol)
+
+ def test_add_channel_delta_after_identity(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+ # Sec-WebSocket-Protocol is different from client's opening handshake
+ # of the physical connection.
+ # TODO(bashi): Remove Upgrade, Connection, Sec-WebSocket-Key and
+ # Sec-WebSocket-Version.
+ encoded_handshake = (
+ 'GET /echo HTTP/1.1\r\n'
+ 'Host: server.example.com\r\n'
+ 'Sec-WebSocket-Protocol: x-foo\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Origin: http://example.com\r\n'
+ '\r\n')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ delta = 'GET /echo HTTP/1.1\r\n\r\n'
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=3, encoding=1, encoded_handshake=delta)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=3,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='World'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual([], dispatcher.channel_events[1].messages)
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ self.assertEqual(['World'], dispatcher.channel_events[3].messages)
+ # Channel 2
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('Hello', messages[0])
+ # Channel 3
+ messages = request.connection.get_written_messages(3)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('World', messages[0])
+ # Handshake base should be updated.
+ self.assertEqual(
+ 'x-foo',
+ mux_handler._handshake_base._headers['Sec-WebSocket-Protocol'])
+
+ def test_add_channel_delta_remove_header(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+ # Override handshake delta base.
+ encoded_handshake = (
+ 'GET /echo HTTP/1.1\r\n'
+ 'Host: server.example.com\r\n'
+ 'Sec-WebSocket-Protocol: x-foo\r\n'
+ 'Connection: Upgrade\r\n'
+ 'Origin: http://example.com\r\n'
+ '\r\n')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ # Remove Sec-WebSocket-Protocol header.
+ delta = ('GET /echo HTTP/1.1\r\n'
+ 'Sec-WebSocket-Protocol:'
+ '\r\n')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=3, encoding=1, encoded_handshake=delta)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=3,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='World'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual([], dispatcher.channel_events[1].messages)
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ self.assertEqual(['World'], dispatcher.channel_events[3].messages)
+ # Channel 2
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('Hello', messages[0])
+ # Channel 3
+ messages = request.connection.get_written_messages(3)
+ self.assertEqual(1, len(messages))
+ self.assertEqual('World', messages[0])
+ self.assertEqual(
+ 'x-foo',
+ dispatcher.channel_events[2].request.ws_protocol)
+ self.assertEqual(
+ None,
+ dispatcher.channel_events[3].request.ws_protocol)
+
+ def test_add_channel_delta_encoding_permessage_compress(self):
+ # Enable permessage compress extension on the implicitly opened channel.
+ extensions = common.parse_extensions(
+ '%s; method=deflate' % common.PERMESSAGE_COMPRESSION_EXTENSION)
+ request = _create_mock_request(
+ logical_channel_extensions=extensions)
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ delta = 'GET /echo HTTP/1.1\r\n\r\n'
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=1, encoded_handshake=delta)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=20)
+ request.connection.put_bytes(flow_control)
+
+ # Send compressed 'Hello' on logical channel 1 and 2.
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message=compressed_hello,
+ rsv1=True))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message=compressed_hello,
+ rsv1=True))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual(['Hello'], dispatcher.channel_events[1].messages)
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ # Written 'Hello's should be compressed.
+ messages = request.connection.get_written_messages(1)
+ self.assertEqual(1, len(messages))
+ self.assertEqual(compressed_hello, messages[0])
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(1, len(messages))
+ self.assertEqual(compressed_hello, messages[0])
+
+ def test_add_channel_delta_encoding_remove_extensions(self):
+ # Enable permessage compress extension on the implicitly opened channel.
+ extensions = common.parse_extensions(
+ '%s; method=deflate' % common.PERMESSAGE_COMPRESSION_EXTENSION)
+ request = _create_mock_request(
+ logical_channel_extensions=extensions)
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ # Remove permessage compress extension.
+ delta = ('GET /echo HTTP/1.1\r\n'
+ 'Sec-WebSocket-Extensions:\r\n'
+ '\r\n')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=1, encoded_handshake=delta)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=20)
+ request.connection.put_bytes(flow_control)
+
+ # Send compressed message on logical channel 2. The message should
+ # be rejected (since rsv1 is set).
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('Hello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message=compressed_hello,
+ rsv1=True))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_NORMAL_CLOSURE, drop_channel.drop_code)
+ self.assertEqual(2, drop_channel.channel_id)
+ # UnsupportedFrameException should be raised on logical channel 2.
+ self.assertTrue(isinstance(dispatcher.channel_events[2].exception,
+ UnsupportedFrameException))
+
+ def test_add_channel_invalid_encoding(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=3,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_UNKNOWN_REQUEST_ENCODING,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_add_channel_incomplete_handshake(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ incomplete_encoded_handshake = 'GET /echo HTTP/1.1'
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=incomplete_encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertTrue(1 in dispatcher.channel_events)
+ self.assertTrue(not 2 in dispatcher.channel_events)
+
+ def test_add_channel_duplicate_channel_id(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_CHANNEL_ALREADY_EXISTS,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_receive_drop_channel(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ drop_channel = _create_drop_channel_frame(channel_id=2)
+ request.connection.put_bytes(drop_channel)
+
+ # Terminate implicitly opened channel.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ exception = dispatcher.channel_events[2].exception
+ self.assertTrue(exception.__class__ == ConnectionTerminatedException)
+
+ def test_receive_ping_frame(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=13)
+ request.connection.put_bytes(flow_control)
+
+ ping_frame = _create_logical_frame(channel_id=2,
+ message='Hello World!',
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(ping_frame)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PONG, messages[0]['opcode'])
+ self.assertEqual('Hello World!', messages[0]['message'])
+
+ def test_receive_fragmented_ping(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=13)
+ request.connection.put_bytes(flow_control)
+
+ # Send a ping with message 'Hello world!' in two fragmented frames.
+ ping_frame1 = _create_logical_frame(channel_id=2,
+ message='Hello ',
+ fin=False,
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(ping_frame1)
+ ping_frame2 = _create_logical_frame(channel_id=2,
+ message='World!',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(ping_frame2)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PONG, messages[0]['opcode'])
+ self.assertEqual('Hello World!', messages[0]['message'])
+
+ def test_receive_fragmented_ping_while_receiving_fragmented_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=19)
+ request.connection.put_bytes(flow_control)
+
+ # Send a fragmented frame of message 'Hello '.
+ hello = _create_logical_frame(channel_id=2,
+ message='Hello ',
+ fin=False)
+ request.connection.put_bytes(hello)
+
+ # Before sending the last fragmented frame of the message, send a
+ # fragmented ping.
+ ping1 = _create_logical_frame(channel_id=2,
+ message='Pi',
+ fin=False,
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(ping1)
+ ping2 = _create_logical_frame(channel_id=2,
+ message='ng!',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(ping2)
+
+ # Send the last fragmented frame of the message.
+ world = _create_logical_frame(channel_id=2,
+ message='World!',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(world)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['Hello World!'], messages)
+ control_messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PONG, control_messages[0]['opcode'])
+ self.assertEqual('Ping!', control_messages[0]['message'])
+
+ def test_receive_two_ping_while_receiving_fragmented_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=25)
+ request.connection.put_bytes(flow_control)
+
+ # Send a fragmented frame of message 'Hello '.
+ hello = _create_logical_frame(channel_id=2,
+ message='Hello ',
+ fin=False)
+ request.connection.put_bytes(hello)
+
+ # Before sending the last fragmented frame of the message, send a
+ # fragmented ping and a non-fragmented ping.
+ ping1 = _create_logical_frame(channel_id=2,
+ message='Pi',
+ fin=False,
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(ping1)
+ ping2 = _create_logical_frame(channel_id=2,
+ message='ng!',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(ping2)
+ ping3 = _create_logical_frame(channel_id=2,
+ message='Pong!',
+ fin=True,
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(ping3)
+
+ # Send the last fragmented frame of the message.
+ world = _create_logical_frame(channel_id=2,
+ message='World!',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(world)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['Hello World!'], messages)
+ control_messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PONG, control_messages[0]['opcode'])
+ self.assertEqual('Ping!', control_messages[0]['message'])
+ self.assertEqual(common.OPCODE_PONG, control_messages[1]['opcode'])
+ self.assertEqual('Pong!', control_messages[1]['message'])
+
+ def test_receive_message_while_receiving_fragmented_ping(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=19)
+ request.connection.put_bytes(flow_control)
+
+ # Send a fragmented ping.
+ ping1 = _create_logical_frame(channel_id=2,
+ message='Pi',
+ fin=False,
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(ping1)
+
+ # Before sending the last fragmented ping, send a message.
+ # The logical channel (2) should be dropped.
+ message = _create_logical_frame(channel_id=2,
+ message='Hello world!',
+ fin=True)
+ request.connection.put_bytes(message)
+
+ # Send the last fragmented frame of the message.
+ ping2 = _create_logical_frame(channel_id=2,
+ message='ng!',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(ping2)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(2, drop_channel.channel_id)
+ # No message should be sent on channel 2.
+ self.assertRaises(KeyError,
+ request.connection.get_written_messages,
+ 2)
+ self.assertRaises(KeyError,
+ request.connection.get_written_control_messages,
+ 2)
+
+ def test_send_ping(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/ping')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PING, messages[0]['opcode'])
+ self.assertEqual('Ping!', messages[0]['message'])
+
+ def test_send_fragmented_ping(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/ping')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Replenish 3 bytes. This isn't enough to send the whole ping frame
+ # because the frame will have 5 bytes message('Ping!'). The frame
+ # should be fragmented.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=3)
+ request.connection.put_bytes(flow_control)
+
+ # Wait until the worker is blocked due to send quota shortage.
+ time.sleep(1)
+
+ # Replenish remaining 2 + 1 bytes (including extra cost).
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=3)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PING, messages[0]['opcode'])
+ self.assertEqual('Ping!', messages[0]['message'])
+
+ def test_send_fragmented_ping_while_sending_fragmented_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(
+ path='/ping_while_hello_world')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Application will send:
+ # - text message 'Hello ' with fin=0
+ # - ping with 'Ping!' message
+ # - text message 'World!' with fin=1
+ # Replenish (6 + 1) + (2 + 1) bytes so that the ping will be
+ # fragmented on the logical channel.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=10)
+ request.connection.put_bytes(flow_control)
+
+ time.sleep(1)
+
+ # Replenish remaining 3 + 6 bytes.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=9)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['Hello World!'], messages)
+ control_messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PING, control_messages[0]['opcode'])
+ self.assertEqual('Ping!', control_messages[0]['message'])
+
+ def test_send_fragmented_two_ping_while_sending_fragmented_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(
+ path='/two_ping_while_hello_world')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Application will send:
+ # - text message 'Hello ' with fin=0
+ # - ping with 'Ping!' message
+ # - ping with 'Pong!' message
+ # - text message 'World!' with fin=1
+ # Replenish (6 + 1) + (2 + 1) bytes so that the first ping will be
+ # fragmented on the logical channel.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=10)
+ request.connection.put_bytes(flow_control)
+
+ time.sleep(1)
+
+ # Replenish remaining 3 + (5 + 1) + 6 bytes. The second ping won't
+ # be fragmented on the logical channel.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=15)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['Hello World!'], messages)
+ control_messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PING, control_messages[0]['opcode'])
+ self.assertEqual('Ping!', control_messages[0]['message'])
+ self.assertEqual(common.OPCODE_PING, control_messages[1]['opcode'])
+ self.assertEqual('Pong!', control_messages[1]['message'])
+
+ def test_send_drop_channel(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ # DropChannel for channel id 1 which doesn't have reason.
+ frame = create_binary_frame('\x00\x60\x01\x00', mask=True)
+ request.connection.put_bytes(frame)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_ACKNOWLEDGED,
+ drop_channel.drop_code)
+ self.assertEqual(1, drop_channel.channel_id)
+
+ def test_two_flow_control(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Replenish 5 bytes.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=5)
+ request.connection.put_bytes(flow_control)
+
+ # Send 10 bytes. The server will try echo back 10 bytes.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='HelloWorld'))
+
+ # Replenish 5 + 1 (per-message extra cost) bytes.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['HelloWorld'], messages)
+ received_flow_controls = [
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_FLOW_CONTROL and b.channel_id == 2]
+ # Replenishment for 'HelloWorld' + 1
+ self.assertEqual(11, received_flow_controls[0].send_quota)
+ # Replenishment for 'Goodbye' + 1
+ self.assertEqual(8, received_flow_controls[1].send_quota)
+
+ def test_no_send_quota_on_server(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='HelloWorld'))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ # Just wait for 1 sec so that the server attempts to echo back
+ # 'HelloWorld'.
+ self.assertFalse(mux_handler.wait_until_done(timeout=1))
+
+ # No message should be sent on channel 2.
+ self.assertRaises(KeyError,
+ request.connection.get_written_messages,
+ 2)
+
+ def test_no_send_quota_on_server_for_permessage_extra_cost(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+ # Replenish only len('World') bytes.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=5)
+ request.connection.put_bytes(flow_control)
+ # Server should not callback for this message.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='World'))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ # Just wait for 1 sec so that the server attempts to echo back
+ # 'World'.
+ self.assertFalse(mux_handler.wait_until_done(timeout=1))
+
+ # Only one message should be sent on channel 2.
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['Hello'], messages)
+
+ def test_quota_violation_by_client(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS, 0)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='HelloWorld'))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ control_blocks = request.connection.get_written_control_blocks()
+ self.assertEqual(5, len(control_blocks))
+ drop_channel = next(
+ b for b in control_blocks
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_SEND_QUOTA_VIOLATION,
+ drop_channel.drop_code)
+
+ def test_consume_quota_empty_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ # Client has 1 byte quota.
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS, 1)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=2)
+ request.connection.put_bytes(flow_control)
+ # Send an empty message. Pywebsocket always replenishes 1 byte quota
+ # for empty message
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message=''))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ # This message violates quota on channel id 2.
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual(1, len(dispatcher.channel_events[2].messages))
+ self.assertEqual('', dispatcher.channel_events[2].messages[0])
+
+ received_flow_controls = [
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_FLOW_CONTROL and b.channel_id == 2]
+ self.assertEqual(1, len(received_flow_controls))
+ self.assertEqual(1, received_flow_controls[0].send_quota)
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(2, drop_channel.channel_id)
+ self.assertEqual(mux._DROP_CODE_SEND_QUOTA_VIOLATION,
+ drop_channel.drop_code)
+
+ def test_consume_quota_fragmented_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ # Client has len('Hello') + len('Goodbye') + 2 bytes quota.
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS, 14)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='He', fin=False,
+ opcode=common.OPCODE_TEXT))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='llo', fin=True,
+ opcode=common.OPCODE_CONTINUATION))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(['Hello'], messages)
+
+ def test_fragmented_control_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/ping')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Replenish total 6 bytes in 3 FlowControls.
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=1)
+ request.connection.put_bytes(flow_control)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=2)
+ request.connection.put_bytes(flow_control)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=3)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ messages = request.connection.get_written_control_messages(2)
+ self.assertEqual(common.OPCODE_PING, messages[0]['opcode'])
+ self.assertEqual('Ping!', messages[0]['message'])
+
+ def test_channel_slot_violation_by_client(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(slots=1,
+ send_quota=mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Hello'))
+
+ # This request should be rejected.
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=3, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+ flow_control = _create_flow_control_frame(channel_id=3,
+ replenished_quota=6)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=3, message='Hello'))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual([], dispatcher.channel_events[1].messages)
+ self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
+ self.assertFalse(dispatcher.channel_events.has_key(3))
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(3, drop_channel.channel_id)
+ self.assertEqual(mux._DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION,
+ drop_channel.drop_code)
+
+ def test_quota_overflow_by_client(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(slots=1,
+ send_quota=mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+ # Replenish 0x7FFFFFFFFFFFFFFF bytes twice.
+ flow_control = _create_flow_control_frame(
+ channel_id=2,
+ replenished_quota=0x7FFFFFFFFFFFFFFF)
+ request.connection.put_bytes(flow_control)
+ request.connection.put_bytes(flow_control)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(2, drop_channel.channel_id)
+ self.assertEqual(mux._DROP_CODE_SEND_QUOTA_OVERFLOW,
+ drop_channel.drop_code)
+
+ def test_invalid_encapsulated_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ first_byte = (mux._MUX_OPCODE_ADD_CHANNEL_REQUEST << 5)
+ block = (chr(first_byte) +
+ mux._encode_channel_id(1) +
+ mux._encode_number(0))
+ payload = mux._encode_channel_id(mux._CONTROL_CHANNEL_ID) + block
+ text_frame = create_binary_frame(payload, opcode=common.OPCODE_TEXT,
+ mask=True)
+ request.connection.put_bytes(text_frame)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_channel_id_truncated(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ # The last byte of the channel id is missing.
+ frame = create_binary_frame('\x80', mask=True)
+ request.connection.put_bytes(frame)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_CHANNEL_ID_TRUNCATED,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_inner_frame_truncated(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ # Just contain channel id 1.
+ frame = create_binary_frame('\x01', mask=True)
+ request.connection.put_bytes(frame)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_unknown_mux_opcode(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ # Undefined opcode 5
+ frame = create_binary_frame('\x00\xa0', mask=True)
+ request.connection.put_bytes(frame)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_UNKNOWN_MUX_OPCODE,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_invalid_mux_control_block(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ # DropChannel contains 1 byte reason
+ frame = create_binary_frame('\x00\x60\x00\x01\x00', mask=True)
+ request.connection.put_bytes(frame)
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channel = next(
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL)
+ self.assertEqual(mux._DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ drop_channel.drop_code)
+ self.assertEqual(common.STATUS_INTERNAL_ENDPOINT_ERROR,
+ request.connection.server_close_code)
+
+ def test_permessage_compress(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ # Enable permessage compress extension on logical channel 2.
+ extensions = '%s; method=deflate' % (
+ common.PERMESSAGE_COMPRESSION_EXTENSION)
+ encoded_handshake = _create_request_header(path='/echo',
+ extensions=extensions)
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ flow_control = _create_flow_control_frame(channel_id=2,
+ replenished_quota=20)
+ request.connection.put_bytes(flow_control)
+
+ # Send compressed 'Hello' twice.
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello1 = compress.compress('Hello')
+ compressed_hello1 += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello1 = compressed_hello1[:-4]
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message=compressed_hello1,
+ rsv1=True))
+ compressed_hello2 = compress.compress('Hello')
+ compressed_hello2 += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello2 = compressed_hello2[:-4]
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message=compressed_hello2,
+ rsv1=True))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=2, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual(['Hello', 'Hello'],
+ dispatcher.channel_events[2].messages)
+ # Written 'Hello's should be compressed.
+ messages = request.connection.get_written_messages(2)
+ self.assertEqual(2, len(messages))
+ self.assertEqual(compressed_hello1, messages[0])
+ self.assertEqual(compressed_hello2, messages[1])
+
+
+ def test_permessage_compress_fragmented_message(self):
+ extensions = common.parse_extensions(
+ '%s; method=deflate' % common.PERMESSAGE_COMPRESSION_EXTENSION)
+ request = _create_mock_request(
+ logical_channel_extensions=extensions)
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ # Send compressed 'HelloHelloHello' as fragmented message.
+ compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
+ compressed_hello = compress.compress('HelloHelloHello')
+ compressed_hello += compress.flush(zlib.Z_SYNC_FLUSH)
+ compressed_hello = compressed_hello[:-4]
+
+ m = len(compressed_hello) / 2
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1,
+ message=compressed_hello[:m],
+ fin=False, rsv1=True,
+ opcode=common.OPCODE_TEXT))
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1,
+ message=compressed_hello[m:],
+ fin=True, rsv1=False,
+ opcode=common.OPCODE_CONTINUATION))
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ self.assertEqual(['HelloHelloHello'],
+ dispatcher.channel_events[1].messages)
+ messages = request.connection.get_written_messages(1)
+ self.assertEqual(1, len(messages))
+ self.assertEqual(compressed_hello, messages[0])
+
+ def test_receive_bad_fragmented_message(self):
+ request = _create_mock_request()
+ dispatcher = _MuxMockDispatcher()
+ mux_handler = mux._MuxHandler(request, dispatcher)
+ mux_handler.start()
+ mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ mux._INITIAL_QUOTA_FOR_CLIENT)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=2, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Send a frame with fin=False, and then send a frame with
+ # opcode=TEXT (not CONTINUATION). Logical channel 2 should be dropped.
+ frame1 = _create_logical_frame(channel_id=2,
+ message='Hello ',
+ fin=False,
+ opcode=common.OPCODE_TEXT)
+ request.connection.put_bytes(frame1)
+ frame2 = _create_logical_frame(channel_id=2,
+ message='World!',
+ fin=True,
+ opcode=common.OPCODE_TEXT)
+ request.connection.put_bytes(frame2)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=3, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Send a frame with opcode=CONTINUATION without a preceding frame
+ # the fin of which is not set. Logical channel 3 should be dropped.
+ frame3 = _create_logical_frame(channel_id=3,
+ message='Hello',
+ fin=True,
+ opcode=common.OPCODE_CONTINUATION)
+ request.connection.put_bytes(frame3)
+
+ encoded_handshake = _create_request_header(path='/echo')
+ add_channel_request = _create_add_channel_request_frame(
+ channel_id=4, encoding=0,
+ encoded_handshake=encoded_handshake)
+ request.connection.put_bytes(add_channel_request)
+
+ # Send a frame with opcode=PING and fin=False, and then send a frame
+ # with opcode=TEXT (not CONTINUATION). Logical channel 4 should be
+ # dropped.
+ frame4 = _create_logical_frame(channel_id=4,
+ message='Ping',
+ fin=False,
+ opcode=common.OPCODE_PING)
+ request.connection.put_bytes(frame4)
+ frame5 = _create_logical_frame(channel_id=4,
+ message='Hello',
+ fin=True,
+ opcode=common.OPCODE_TEXT)
+ request.connection.put_bytes(frame5)
+
+ request.connection.put_bytes(
+ _create_logical_frame(channel_id=1, message='Goodbye'))
+
+ self.assertTrue(mux_handler.wait_until_done(timeout=2))
+
+ drop_channels = [
+ b for b in request.connection.get_written_control_blocks()
+ if b.opcode == mux._MUX_OPCODE_DROP_CHANNEL]
+ self.assertEqual(3, len(drop_channels))
+ for d in drop_channels:
+ self.assertEqual(mux._DROP_CODE_BAD_FRAGMENTATION,
+ d.drop_code)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_stream.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_stream.py
new file mode 100755
index 000000000..81acfeb04
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_stream.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for stream module."""
+
+
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import common
+from mod_pywebsocket import stream
+
+
+class StreamTest(unittest.TestCase):
+ """A unittest for stream module."""
+
+ def test_create_header(self):
+ # more, rsv1, ..., rsv4 are all true
+ header = stream.create_header(common.OPCODE_TEXT, 1, 1, 1, 1, 1, 1)
+ self.assertEqual('\xf1\x81', header)
+
+ # Maximum payload size
+ header = stream.create_header(
+ common.OPCODE_TEXT, (1 << 63) - 1, 0, 0, 0, 0, 0)
+ self.assertEqual('\x01\x7f\x7f\xff\xff\xff\xff\xff\xff\xff', header)
+
+ # Invalid opcode 0x10
+ self.assertRaises(ValueError,
+ stream.create_header,
+ 0x10, 0, 0, 0, 0, 0, 0)
+
+ # Invalid value 0xf passed to more parameter
+ self.assertRaises(ValueError,
+ stream.create_header,
+ common.OPCODE_TEXT, 0, 0xf, 0, 0, 0, 0)
+
+ # Too long payload_length
+ self.assertRaises(ValueError,
+ stream.create_header,
+ common.OPCODE_TEXT, 1 << 63, 0, 0, 0, 0, 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_stream_hixie75.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_stream_hixie75.py
new file mode 100755
index 000000000..ca9ac7130
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_stream_hixie75.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for stream module."""
+
+
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket.stream import StreamHixie75
+from test.test_msgutil import _create_request_hixie75
+
+
+class StreamHixie75Test(unittest.TestCase):
+ """A unittest for StreamHixie75 class."""
+
+ def test_payload_length(self):
+ for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
+ (0x1234, '\x80\xa4\x34')):
+ test_stream = StreamHixie75(_create_request_hixie75(bytes))
+ self.assertEqual(
+ length, test_stream._read_payload_length_hixie75())
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/test_util.py b/testing/web-platform/tests/tools/pywebsocket/src/test/test_util.py
new file mode 100755
index 000000000..20f4ab059
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/test_util.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Tests for util module."""
+
+
+import os
+import random
+import sys
+import unittest
+
+import set_sys_path # Update sys.path to locate mod_pywebsocket module.
+
+from mod_pywebsocket import util
+
+
+_TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata')
+
+
+class UtilTest(unittest.TestCase):
+ """A unittest for util module."""
+
+ def test_get_stack_trace(self):
+ self.assertEqual('None\n', util.get_stack_trace())
+ try:
+ a = 1 / 0 # Intentionally raise exception.
+ except Exception:
+ trace = util.get_stack_trace()
+ self.failUnless(trace.startswith('Traceback'))
+ self.failUnless(trace.find('ZeroDivisionError') != -1)
+
+ def test_prepend_message_to_exception(self):
+ exc = Exception('World')
+ self.assertEqual('World', str(exc))
+ util.prepend_message_to_exception('Hello ', exc)
+ self.assertEqual('Hello World', str(exc))
+
+ def test_get_script_interp(self):
+ cygwin_path = 'c:\\cygwin\\bin'
+ cygwin_perl = os.path.join(cygwin_path, 'perl')
+ self.assertEqual(None, util.get_script_interp(
+ os.path.join(_TEST_DATA_DIR, 'README')))
+ self.assertEqual(None, util.get_script_interp(
+ os.path.join(_TEST_DATA_DIR, 'README'), cygwin_path))
+ self.assertEqual('/usr/bin/perl -wT', util.get_script_interp(
+ os.path.join(_TEST_DATA_DIR, 'hello.pl')))
+ self.assertEqual(cygwin_perl + ' -wT', util.get_script_interp(
+ os.path.join(_TEST_DATA_DIR, 'hello.pl'), cygwin_path))
+
+ def test_hexify(self):
+ self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff',
+ util.hexify('azAZ09 \t\r\n\x00\xff'))
+
+
+class RepeatedXorMaskerTest(unittest.TestCase):
+ """A unittest for RepeatedXorMasker class."""
+
+ def test_mask(self):
+ # Sample input e6,97,a5 is U+65e5 in UTF-8
+ masker = util.RepeatedXorMasker('\xff\xff\xff\xff')
+ result = masker.mask('\xe6\x97\xa5')
+ self.assertEqual('\x19\x68\x5a', result)
+
+ masker = util.RepeatedXorMasker('\x00\x00\x00\x00')
+ result = masker.mask('\xe6\x97\xa5')
+ self.assertEqual('\xe6\x97\xa5', result)
+
+ masker = util.RepeatedXorMasker('\xe6\x97\xa5\x20')
+ result = masker.mask('\xe6\x97\xa5')
+ self.assertEqual('\x00\x00\x00', result)
+
+ def test_mask_twice(self):
+ masker = util.RepeatedXorMasker('\x00\x7f\xff\x20')
+ # mask[0], mask[1], ... will be used.
+ result = masker.mask('\x00\x00\x00\x00\x00')
+ self.assertEqual('\x00\x7f\xff\x20\x00', result)
+ # mask[2], mask[0], ... will be used for the next call.
+ result = masker.mask('\x00\x00\x00\x00\x00')
+ self.assertEqual('\x7f\xff\x20\x00\x7f', result)
+
+ def test_mask_large_data(self):
+ masker = util.RepeatedXorMasker('mASk')
+ original = ''.join([chr(i % 256) for i in xrange(1000)])
+ result = masker.mask(original)
+ expected = ''.join(
+ [chr((i % 256) ^ ord('mASk'[i % 4])) for i in xrange(1000)])
+ self.assertEqual(expected, result)
+
+ masker = util.RepeatedXorMasker('MaSk')
+ first_part = 'The WebSocket Protocol enables two-way communication.'
+ result = masker.mask(first_part)
+ self.assertEqual(
+ '\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#'
+ '\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9'
+ '\x08<\x05c',
+ result)
+ second_part = 'It has two parts: a handshake and the data transfer.'
+ result = masker.mask(second_part)
+ self.assertEqual(
+ "('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#"
+ "\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c",
+ result)
+
+
+def get_random_section(source, min_num_chunks):
+ chunks = []
+ bytes_chunked = 0
+
+ while bytes_chunked < len(source):
+ chunk_size = random.randint(
+ 1,
+ min(len(source) / min_num_chunks, len(source) - bytes_chunked))
+ chunk = source[bytes_chunked:bytes_chunked + chunk_size]
+ chunks.append(chunk)
+ bytes_chunked += chunk_size
+
+ return chunks
+
+
+class InflaterDeflaterTest(unittest.TestCase):
+ """A unittest for _Inflater and _Deflater class."""
+
+ def test_inflate_deflate_default(self):
+ input = b'hello' + '-' * 30000 + b'hello'
+ inflater15 = util._Inflater(15)
+ deflater15 = util._Deflater(15)
+ inflater8 = util._Inflater(8)
+ deflater8 = util._Deflater(8)
+
+ compressed15 = deflater15.compress_and_finish(input)
+ compressed8 = deflater8.compress_and_finish(input)
+
+ inflater15.append(compressed15)
+ inflater8.append(compressed8)
+
+ self.assertNotEqual(compressed15, compressed8)
+ self.assertEqual(input, inflater15.decompress(-1))
+ self.assertEqual(input, inflater8.decompress(-1))
+
+ def test_random_section(self):
+ random.seed(a=0)
+ source = ''.join(
+ [chr(random.randint(0, 255)) for i in xrange(100 * 1024)])
+
+ chunked_input = get_random_section(source, 10)
+ print "Input chunk sizes: %r" % [len(c) for c in chunked_input]
+
+ deflater = util._Deflater(15)
+ compressed = []
+ for chunk in chunked_input:
+ compressed.append(deflater.compress(chunk))
+ compressed.append(deflater.compress_and_finish(''))
+
+ chunked_expectation = get_random_section(source, 10)
+ print ("Expectation chunk sizes: %r" %
+ [len(c) for c in chunked_expectation])
+
+ inflater = util._Inflater(15)
+ inflater.append(''.join(compressed))
+ for chunk in chunked_expectation:
+ decompressed = inflater.decompress(len(chunk))
+ self.assertEqual(chunk, decompressed)
+
+ self.assertEqual('', inflater.decompress(-1))
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/README b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/README
new file mode 100644
index 000000000..c001aa559
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/README
@@ -0,0 +1 @@
+Test data directory
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/abort_by_user_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/abort_by_user_wsh.py
new file mode 100644
index 000000000..367f9930f
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/abort_by_user_wsh.py
@@ -0,0 +1,42 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import handshake
+
+
+def web_socket_do_extra_handshake(request):
+ raise handshake.AbortedByUserException("abort for test")
+
+
+def web_socket_transfer_data(request):
+ raise handshake.AbortedByUserException("abort for test")
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py
new file mode 100644
index 000000000..7f87c6af2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py
@@ -0,0 +1,31 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# intentionally left blank
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/origin_check_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/origin_check_wsh.py
new file mode 100644
index 000000000..2c139fa17
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/origin_check_wsh.py
@@ -0,0 +1,42 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def web_socket_do_extra_handshake(request):
+ if request.ws_origin == 'http://example.com':
+ return
+ raise ValueError('Unacceptable origin: %r' % request.ws_origin)
+
+
+def web_socket_transfer_data(request):
+ request.connection.write('origin_check_wsh.py is called for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/exception_in_transfer_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/exception_in_transfer_wsh.py
new file mode 100644
index 000000000..b982d0231
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/exception_in_transfer_wsh.py
@@ -0,0 +1,44 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Exception in web_socket_transfer_data().
+"""
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ raise Exception('Intentional Exception for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/no_wsh_at_the_end.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/no_wsh_at_the_end.py
new file mode 100644
index 000000000..17e7be180
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/no_wsh_at_the_end.py
@@ -0,0 +1,45 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Correct signatures, wrong file name.
+"""
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ request.connection.write(
+ 'sub/no_wsh_at_the_end.py is called for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/non_callable_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/non_callable_wsh.py
new file mode 100644
index 000000000..26352eb4c
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/non_callable_wsh.py
@@ -0,0 +1,39 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Non-callable handlers.
+"""
+
+
+web_socket_do_extra_handshake = True
+web_socket_transfer_data = 1
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py
new file mode 100644
index 000000000..db3ff6930
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py
@@ -0,0 +1,40 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ request.connection.write('sub/plain_wsh.py is called for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py
new file mode 100644
index 000000000..6bf659bc9
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py
@@ -0,0 +1,45 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Wrong web_socket_do_extra_handshake signature.
+"""
+
+
+def no_web_socket_do_extra_handshake(request):
+ pass
+
+
+def web_socket_transfer_data(request):
+ request.connection.write(
+ 'sub/wrong_handshake_sig_wsh.py is called for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py
new file mode 100644
index 000000000..e0e2e5507
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py
@@ -0,0 +1,45 @@
+# Copyright 2009, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Wrong web_socket_transfer_data() signature.
+"""
+
+
+def web_socket_do_extra_handshake(request):
+ pass
+
+
+def no_web_socket_transfer_data(request):
+ request.connection.write(
+ 'sub/wrong_transfer_sig_wsh.py is called for %s, %s' %
+ (request.ws_resource, request.ws_protocol))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/hello.pl b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/hello.pl
new file mode 100644
index 000000000..882ef5a10
--- /dev/null
+++ b/testing/web-platform/tests/tools/pywebsocket/src/test/testdata/hello.pl
@@ -0,0 +1,32 @@
+#!/usr/bin/perl -wT
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+print "Hello\n";
diff --git a/testing/web-platform/tests/tools/runner/css/bootstrap-theme.min.css b/testing/web-platform/tests/tools/runner/css/bootstrap-theme.min.css
new file mode 100644
index 000000000..61358b13d
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/css/bootstrap-theme.min.css
@@ -0,0 +1,5 @@
+/*!
+ * Bootstrap v3.3.5 (http://getbootstrap.com)
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/runner/css/bootstrap.min.css b/testing/web-platform/tests/tools/runner/css/bootstrap.min.css
new file mode 100644
index 000000000..d65c66b1b
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/css/bootstrap.min.css
@@ -0,0 +1,5 @@
+/*!
+ * Bootstrap v3.3.5 (http://getbootstrap.com)
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:14.33px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:3;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:2;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{min-height:16.43px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.eot b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.eot
new file mode 100644
index 000000000..4a4ca865d
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.eot
Binary files differ
diff --git a/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.svg b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.svg
new file mode 100644
index 000000000..e3e2dc739
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,229 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph />
+<glyph />
+<glyph unicode="&#xd;" />
+<glyph unicode=" " />
+<glyph unicode="*" d="M100 500v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259z" />
+<glyph unicode="+" d="M0 400v300h400v400h300v-400h400v-300h-400v-400h-300v400h-400z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#x2000;" horiz-adv-x="652" />
+<glyph unicode="&#x2001;" horiz-adv-x="1304" />
+<glyph unicode="&#x2002;" horiz-adv-x="652" />
+<glyph unicode="&#x2003;" horiz-adv-x="1304" />
+<glyph unicode="&#x2004;" horiz-adv-x="434" />
+<glyph unicode="&#x2005;" horiz-adv-x="326" />
+<glyph unicode="&#x2006;" horiz-adv-x="217" />
+<glyph unicode="&#x2007;" horiz-adv-x="217" />
+<glyph unicode="&#x2008;" horiz-adv-x="163" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="326" />
+<glyph unicode="&#x20ac;" d="M100 500l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406l-100 -100 h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217z" />
+<glyph unicode="&#x2212;" d="M200 400h900v300h-900v-300z" />
+<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#x2601;" d="M-14 494q0 -80 56.5 -137t135.5 -57h750q120 0 205 86.5t85 207.5t-85 207t-205 86q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5z" />
+<glyph unicode="&#x2709;" d="M0 100l400 400l200 -200l200 200l400 -400h-1200zM0 300v600l300 -300zM0 1100l600 -603l600 603h-1200zM900 600l300 300v-600z" />
+<glyph unicode="&#x270f;" d="M-13 -13l333 112l-223 223zM187 403l214 -214l614 614l-214 214zM887 1103l214 -214l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13z" />
+<glyph unicode="&#xe001;" d="M0 1200h1200l-500 -550v-550h300v-100h-800v100h300v550z" />
+<glyph unicode="&#xe002;" d="M14 84q18 -55 86 -75.5t147 5.5q65 21 109 69t44 90v606l600 155v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q18 -55 86 -75.5t147 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7 q-79 -25 -122.5 -82t-25.5 -112z" />
+<glyph unicode="&#xe003;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
+<glyph unicode="&#xe005;" d="M100 784q0 64 28 123t73 100.5t104.5 64t119 20.5t120 -38.5t104.5 -104.5q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5 t-94 124.5t-33.5 117.5z" />
+<glyph unicode="&#xe006;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1z" />
+<glyph unicode="&#xe007;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1zM237 700l196 -142l-73 -226l192 140l195 -141l-74 229l193 140h-235l-77 211l-78 -211h-239z" />
+<glyph unicode="&#xe008;" d="M0 0v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100l400 -257v-143h-1200z" />
+<glyph unicode="&#xe009;" d="M0 0v1100h1200v-1100h-1200zM100 100h100v100h-100v-100zM100 300h100v100h-100v-100zM100 500h100v100h-100v-100zM100 700h100v100h-100v-100zM100 900h100v100h-100v-100zM300 100h600v400h-600v-400zM300 600h600v400h-600v-400zM1000 100h100v100h-100v-100z M1000 300h100v100h-100v-100zM1000 500h100v100h-100v-100zM1000 700h100v100h-100v-100zM1000 900h100v100h-100v-100z" />
+<glyph unicode="&#xe010;" d="M0 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM0 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5zM600 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM600 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe011;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 450v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe012;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5 t-14.5 -35.5v-200zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe013;" d="M29 454l419 -420l818 820l-212 212l-607 -607l-206 207z" />
+<glyph unicode="&#xe014;" d="M106 318l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282l-212 -212l-282 282l-282 -282z" />
+<glyph unicode="&#xe015;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233zM300 600v200h100v100h200v-100h100v-200h-100v-100h-200v100h-100z" />
+<glyph unicode="&#xe016;" d="M23 694q0 200 142 342t342 142t342 -142t142 -342q0 -141 -78 -262l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 601h400v200h-400v-200z" />
+<glyph unicode="&#xe017;" d="M23 600q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5 zM500 750q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400z" />
+<glyph unicode="&#xe018;" d="M100 1h200v300h-200v-300zM400 1v500h200v-500h-200zM700 1v800h200v-800h-200zM1000 1v1200h200v-1200h-200z" />
+<glyph unicode="&#xe019;" d="M26 601q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39l5 -2l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38 l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73zM385 601 q0 88 63 151t152 63t152 -63t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152z" />
+<glyph unicode="&#xe020;" d="M100 1025v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18zM200 100v800h900v-800q0 -41 -29.5 -71t-70.5 -30h-700q-41 0 -70.5 30 t-29.5 71zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM500 1100h300v100h-300v-100zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
+<glyph unicode="&#xe021;" d="M1 601l656 644l644 -644h-200v-600h-300v400h-300v-400h-300v600h-200z" />
+<glyph unicode="&#xe022;" d="M100 25v1150q0 11 7 18t18 7h475v-500h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18zM700 800v300l300 -300h-300z" />
+<glyph unicode="&#xe023;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 500v400h100 v-300h200v-100h-300z" />
+<glyph unicode="&#xe024;" d="M-100 0l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538l-41 400h-242l-40 -400h-539zM488 500h224l-27 300h-170z" />
+<glyph unicode="&#xe025;" d="M0 0v400h490l-290 300h200v500h300v-500h200l-290 -300h490v-400h-1100zM813 200h175v100h-175v-100z" />
+<glyph unicode="&#xe026;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM188 600q0 -170 121 -291t291 -121t291 121t121 291t-121 291t-291 121 t-291 -121t-121 -291zM350 600h150v300h200v-300h150l-250 -300z" />
+<glyph unicode="&#xe027;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM350 600l250 300 l250 -300h-150v-300h-200v300h-150z" />
+<glyph unicode="&#xe028;" d="M0 25v475l200 700h800l199 -700l1 -475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18zM200 500h200l50 -200h300l50 200h200l-97 500h-606z" />
+<glyph unicode="&#xe029;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 397v401 l297 -200z" />
+<glyph unicode="&#xe030;" d="M23 600q0 -118 45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123t123 184t45.5 224.5h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123 t-123 -184t-45.5 -224.5z" />
+<glyph unicode="&#xe031;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150zM100 0v400h400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122z" />
+<glyph unicode="&#xe032;" d="M100 0h1100v1200h-1100v-1200zM200 100v900h900v-900h-900zM300 200v100h100v-100h-100zM300 400v100h100v-100h-100zM300 600v100h100v-100h-100zM300 800v100h100v-100h-100zM500 200h500v100h-500v-100zM500 400v100h500v-100h-500zM500 600v100h500v-100h-500z M500 800v100h500v-100h-500z" />
+<glyph unicode="&#xe033;" d="M0 100v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
+<glyph unicode="&#xe034;" d="M100 0v1100h100v-1100h-100zM300 400q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500z" />
+<glyph unicode="&#xe035;" d="M0 275q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5 t-49.5 -227v-300zM200 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14zM800 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14z" />
+<glyph unicode="&#xe036;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM688 459l141 141l-141 141l71 71l141 -141l141 141l71 -71l-141 -141l141 -141l-71 -71l-141 141l-141 -141z" />
+<glyph unicode="&#xe037;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
+<glyph unicode="&#xe038;" d="M0 401v400h300l300 200v-800l-300 200h-300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257zM889 951l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8l81 -66l6 8q142 178 142 405q0 230 -144 408l-6 8z" />
+<glyph unicode="&#xe039;" d="M0 0h500v500h-200v100h-100v-100h-200v-500zM0 600h100v100h400v100h100v100h-100v300h-500v-600zM100 100v300h300v-300h-300zM100 800v300h300v-300h-300zM200 200v100h100v-100h-100zM200 900h100v100h-100v-100zM500 500v100h300v-300h200v-100h-100v-100h-200v100 h-100v100h100v200h-200zM600 0v100h100v-100h-100zM600 1000h100v-300h200v-300h300v200h-200v100h200v500h-600v-200zM800 800v300h300v-300h-300zM900 0v100h300v-100h-300zM900 900v100h100v-100h-100zM1100 200v100h100v-100h-100z" />
+<glyph unicode="&#xe040;" d="M0 200h100v1000h-100v-1000zM100 0v100h300v-100h-300zM200 200v1000h100v-1000h-100zM500 0v91h100v-91h-100zM500 200v1000h200v-1000h-200zM700 0v91h100v-91h-100zM800 200v1000h100v-1000h-100zM900 0v91h200v-91h-200zM1000 200v1000h200v-1000h-200z" />
+<glyph unicode="&#xe041;" d="M0 700l1 475q0 10 7.5 17.5t17.5 7.5h474l700 -700l-500 -500zM148 953q0 -42 29 -71q30 -30 71.5 -30t71.5 30q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71z" />
+<glyph unicode="&#xe042;" d="M1 700l1 475q0 11 7 18t18 7h474l700 -700l-500 -500zM148 953q0 -42 30 -71q29 -30 71 -30t71 30q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71zM701 1200h100l700 -700l-500 -500l-50 50l450 450z" />
+<glyph unicode="&#xe043;" d="M100 0v1025l175 175h925v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900z" />
+<glyph unicode="&#xe044;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
+<glyph unicode="&#xe045;" d="M0 100v700h200l100 -200h600l100 200h200v-700h-200v200h-800v-200h-200zM253 829l40 -124h592l62 124l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18zM281 24l38 152q2 10 11.5 17t19.5 7h500q10 0 19.5 -7t11.5 -17l38 -152q2 -10 -3.5 -17t-15.5 -7h-600 q-10 0 -15.5 7t-3.5 17z" />
+<glyph unicode="&#xe046;" d="M0 200q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600z M356 500q0 100 72 172t172 72t172 -72t72 -172t-72 -172t-172 -72t-172 72t-72 172zM494 500q0 -44 31 -75t75 -31t75 31t31 75t-31 75t-75 31t-75 -31t-31 -75zM900 700v100h100v-100h-100z" />
+<glyph unicode="&#xe047;" d="M53 0h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66zM416 521l178 457l46 -140l116 -317h-340 z" />
+<glyph unicode="&#xe048;" d="M100 0v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21t-29 14t-49 14.5v71l471 -1q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111 t-162 -38.5h-500zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400zM400 700h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5v-379z" />
+<glyph unicode="&#xe049;" d="M200 0v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500z" />
+<glyph unicode="&#xe050;" d="M-75 200h75v800h-75l125 167l125 -167h-75v-800h75l-125 -167zM300 900v300h150h700h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49z " />
+<glyph unicode="&#xe051;" d="M33 51l167 125v-75h800v75l167 -125l-167 -125v75h-800v-75zM100 901v300h150h700h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50z" />
+<glyph unicode="&#xe052;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 350q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM0 650q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 950q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe053;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 650q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM200 350q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM200 950q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe054;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe055;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe056;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe057;" d="M-101 500v100h201v75l166 -125l-166 -125v75h-201zM300 0h100v1100h-100v-1100zM500 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35 v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 650q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100 q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe058;" d="M1 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 650 q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM801 0v1100h100v-1100 h-100zM934 550l167 -125v75h200v100h-200v75z" />
+<glyph unicode="&#xe059;" d="M0 275v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53zM900 600l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M0 44v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31zM100 263l247 182l298 -131l-74 156l293 318l236 -288v500h-1000v-737zM208 750q0 56 39 95t95 39t95 -39t39 -95t-39 -95t-95 -39t-95 39t-39 95z " />
+<glyph unicode="&#xe062;" d="M148 745q0 124 60.5 231.5t165 172t226.5 64.5q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262zM342 772q0 -107 75.5 -182.5t181.5 -75.5 q107 0 182.5 75.5t75.5 182.5t-75.5 182t-182.5 75t-182 -75.5t-75 -181.5z" />
+<glyph unicode="&#xe063;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM173 600q0 -177 125.5 -302t301.5 -125v854q-176 0 -301.5 -125 t-125.5 -302z" />
+<glyph unicode="&#xe064;" d="M117 406q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 139t-64 210zM243 414q14 -82 59.5 -136 t136.5 -80l16 98q-7 6 -18 17t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156z" />
+<glyph unicode="&#xe065;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125l200 200v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM436 341l161 50l412 412l-114 113l-405 -405zM995 1015l113 -113l113 113l-21 85l-92 28z" />
+<glyph unicode="&#xe066;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5 zM423 524q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5z" />
+<glyph unicode="&#xe067;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q61 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM342 632l283 -284l567 567l-137 137l-430 -431l-146 147z" />
+<glyph unicode="&#xe068;" d="M0 603l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296l-300 -300v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198z" />
+<glyph unicode="&#xe069;" d="M200 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe070;" d="M0 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe071;" d="M136 550l564 550v-487l500 487v-1100l-500 488v-488z" />
+<glyph unicode="&#xe072;" d="M200 0l900 550l-900 550v-1100z" />
+<glyph unicode="&#xe073;" d="M200 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800zM600 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe074;" d="M200 150q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe075;" d="M0 0v1100l500 -487v487l564 -550l-564 -550v488z" />
+<glyph unicode="&#xe076;" d="M0 0v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488z" />
+<glyph unicode="&#xe077;" d="M300 0v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438z" />
+<glyph unicode="&#xe078;" d="M100 250v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5zM100 500h1100l-550 564z" />
+<glyph unicode="&#xe079;" d="M185 599l592 -592l240 240l-353 353l353 353l-240 240z" />
+<glyph unicode="&#xe080;" d="M272 194l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1l-592 -591z" />
+<glyph unicode="&#xe081;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM300 500h200v-200h200v200h200v200h-200v200h-200v-200h-200v-200z" />
+<glyph unicode="&#xe082;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM300 500h600v200h-600v-200z" />
+<glyph unicode="&#xe083;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM246 459l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141l-141 142l-212 -213l141 -141 z" />
+<glyph unicode="&#xe084;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM270 551l276 -277l411 411l-175 174l-236 -236l-102 102z" />
+<glyph unicode="&#xe085;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM364 700h143q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5 q19 0 30 -10t11 -26q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3q-50 0 -90.5 -12t-75 -38.5t-53.5 -74.5t-19 -114zM500 300h200v100h-200 v-100z" />
+<glyph unicode="&#xe086;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM400 300h400v100h-100v300h-300v-100h100v-200h-100v-100zM500 800h200v100h-200v-100z" />
+<glyph unicode="&#xe087;" d="M0 500v200h195q31 125 98.5 199.5t206.5 100.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194zM290 500q24 -73 79.5 -127.5t130.5 -78.5v206h200v-206 q149 48 201 206h-201v200h200q-25 74 -75.5 127t-124.5 77v-204h-200v203q-75 -23 -130 -77t-79 -126h209v-200h-210z" />
+<glyph unicode="&#xe088;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM356 465l135 135 l-135 135l109 109l135 -135l135 135l109 -109l-135 -135l135 -135l-109 -109l-135 135l-135 -135z" />
+<glyph unicode="&#xe089;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM322 537l141 141 l87 -87l204 205l142 -142l-346 -345z" />
+<glyph unicode="&#xe090;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -115 62 -215l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5zM391 245q97 -59 209 -59q171 0 292.5 121.5t121.5 292.5 q0 112 -59 209z" />
+<glyph unicode="&#xe091;" d="M0 547l600 453v-300h600v-300h-600v-301z" />
+<glyph unicode="&#xe092;" d="M0 400v300h600v300l600 -453l-600 -448v301h-600z" />
+<glyph unicode="&#xe093;" d="M204 600l450 600l444 -600h-298v-600h-300v600h-296z" />
+<glyph unicode="&#xe094;" d="M104 600h296v600h300v-600h298l-449 -600z" />
+<glyph unicode="&#xe095;" d="M0 200q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453l-600 -448v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5z" />
+<glyph unicode="&#xe096;" d="M0 0v400l129 -129l294 294l142 -142l-294 -294l129 -129h-400zM635 777l142 -142l294 294l129 -129v400h-400l129 -129z" />
+<glyph unicode="&#xe097;" d="M34 176l295 295l-129 129h400v-400l-129 130l-295 -295zM600 600v400l129 -129l295 295l142 -141l-295 -295l129 -130h-400z" />
+<glyph unicode="&#xe101;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5t224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5zM456 851l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5 t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5h-207q-21 0 -33 -14.5t-8 -34.5zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe102;" d="M0 800h100v-200h400v300h200v-300h400v200h100v100h-111q1 1 1 6.5t-1.5 15t-3.5 17.5l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6 h-111v-100zM100 0h400v400h-400v-400zM200 900q-3 0 14 48t36 96l18 47l213 -191h-281zM700 0v400h400v-400h-400zM731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269z" />
+<glyph unicode="&#xe103;" d="M0 -22v143l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55z M238.5 300.5q19.5 -6.5 86.5 76.5q55 66 367 234q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5z" />
+<glyph unicode="&#xe104;" d="M111 408q0 -33 5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5 t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5q2 -12 8 -41.5t8 -43t6 -39.5 t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85z" />
+<glyph unicode="&#xe105;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30l26 -40l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5 t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30zM120 600q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5t123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54 q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l105 105q-37 24 -75 72t-57 84l-20 36z" />
+<glyph unicode="&#xe106;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43zM120 600q210 -282 393 -336l37 141q-107 18 -178.5 101.5t-71.5 193.5 q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68l-14 26zM780 161l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52l26 -40l-26 -40 q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5t-124 -100t-146.5 -79z" />
+<glyph unicode="&#xe107;" d="M-97.5 34q13.5 -34 50.5 -34h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 34 -48 36.5t-48 -29.5l-642 -1066q-21 -32 -7.5 -66zM155 200l445 723l445 -723h-345v100h-200v-100h-345zM500 600l100 -300l100 300v100h-200v-100z" />
+<glyph unicode="&#xe108;" d="M100 262v41q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -20 -13 -28.5t-32 0.5l-94 78h-222l-94 -78q-19 -9 -32 -0.5t-13 28.5 v64q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5z" />
+<glyph unicode="&#xe109;" d="M0 50q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100v-750zM0 900h1100v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 100v100h100v-100h-100zM100 300v100h100v-100h-100z M100 500v100h100v-100h-100zM300 100v100h100v-100h-100zM300 300v100h100v-100h-100zM300 500v100h100v-100h-100zM500 100v100h100v-100h-100zM500 300v100h100v-100h-100zM500 500v100h100v-100h-100zM700 100v100h100v-100h-100zM700 300v100h100v-100h-100zM700 500 v100h100v-100h-100zM900 100v100h100v-100h-100zM900 300v100h100v-100h-100zM900 500v100h100v-100h-100z" />
+<glyph unicode="&#xe110;" d="M0 200v200h259l600 600h241v198l300 -295l-300 -300v197h-159l-600 -600h-341zM0 800h259l122 -122l141 142l-181 180h-341v-200zM678 381l141 142l122 -123h159v198l300 -295l-300 -300v197h-241z" />
+<glyph unicode="&#xe111;" d="M0 400v600q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5z" />
+<glyph unicode="&#xe112;" d="M100 600v200h300v-250q0 -113 6 -145q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5 t-58 109.5t-31.5 116t-15 104t-3 83zM100 900v300h300v-300h-300zM800 900v300h300v-300h-300z" />
+<glyph unicode="&#xe113;" d="M-30 411l227 -227l352 353l353 -353l226 227l-578 579z" />
+<glyph unicode="&#xe114;" d="M70 797l580 -579l578 579l-226 227l-353 -353l-352 353z" />
+<glyph unicode="&#xe115;" d="M-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196zM402 1000l215 -200h381v-400h-198l299 -283l299 283h-200v600h-796z" />
+<glyph unicode="&#xe116;" d="M18 939q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15 t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43z" />
+<glyph unicode="&#xe117;" d="M0 0v800h1200v-800h-1200zM0 900v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-100h-1200z" />
+<glyph unicode="&#xe118;" d="M1 0l300 700h1200l-300 -700h-1200zM1 400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000z" />
+<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
+<glyph unicode="&#xe120;" d="M0 600l300 298v-198h600v198l300 -298l-300 -297v197h-600v-197z" />
+<glyph unicode="&#xe121;" d="M0 100v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM800 100h100v100h-100v-100z M1000 100h100v100h-100v-100z" />
+<glyph unicode="&#xe122;" d="M-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5zM100 500v250v8v8v7t0.5 7t1.5 5.5t2 5t3 4t4.5 3.5t6 1.5t7.5 0.5h200l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35 q-55 337 -55 351zM1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe123;" d="M74 350q0 21 13.5 35.5t33.5 14.5h18l117 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94q20 0 29 -10.5t3 -29.5q-18 -36 -18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-8 -3 -23 -8.5 t-65 -20t-103 -25t-132.5 -19.5t-158.5 -9q-125 0 -245.5 20.5t-178.5 40.5l-58 20q-18 7 -31 27.5t-13 40.5zM497 110q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6t-103 6z" />
+<glyph unicode="&#xe124;" d="M21 445l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180l-155 180l-45 -233l-224 78l78 -225l-233 -44l179 -156z" />
+<glyph unicode="&#xe125;" d="M0 200h200v600h-200v-600zM300 275q0 -75 100 -75h61q124 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400z M400 300v375l150 213l100 212h50v-175l-50 -225h450v-125l-250 -375h-214l-136 100h-100z" />
+<glyph unicode="&#xe126;" d="M0 400v600h200v-600h-200zM300 525v400q0 75 100 75h61q124 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5h-50q-27 0 -51 20t-38 48l-96 198l-145 196 q-20 26 -20 63zM400 525l150 -212l100 -213h50v175l-50 225h450v125l-250 375h-214l-136 -100h-100v-375z" />
+<glyph unicode="&#xe127;" d="M8 200v600h200v-600h-200zM308 275v525q0 17 14 35.5t28 28.5l14 9l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341 q-7 0 -90 81t-83 94zM408 289l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83l-339 -236v-503z" />
+<glyph unicode="&#xe128;" d="M-101 651q0 72 54 110t139 38l302 -1l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6l365 -230q7 -4 17 -10.5t26.5 -26t16.5 -36.5v-526q0 -13 -86 -93.5t-94 -80.5h-341q-16 0 -29.5 20t-19.5 41l-130 339h-107q-84 0 -139 39t-55 111zM-1 601h222 q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l107 89v502l-343 237l-87 -83l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100zM1000 201v600h200v-600h-200z" />
+<glyph unicode="&#xe129;" d="M97 719l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53zM172 739l83 86l183 -146 q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6.5v7.5v6.5v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294l-89 -100h-503zM400 0v200h600v-200h-600z" />
+<glyph unicode="&#xe130;" d="M2 585q-16 -31 6 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85q0 -51 -0.5 -153.5t-0.5 -148.5q0 -84 38.5 -138t110.5 -54t111 55t39 139v106l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15zM77 565l236 339h503 l89 -100v-294l-340 -130q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146zM305 1104v200h600v-200h-600z" />
+<glyph unicode="&#xe131;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM298 701l2 -201h300l-2 -194l402 294l-402 298v-197h-300z" />
+<glyph unicode="&#xe132;" d="M0 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5zM200 600l402 -294l-2 194h300l2 201h-300v197z" />
+<glyph unicode="&#xe133;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600h200v-300h200v300h200l-300 400z" />
+<glyph unicode="&#xe134;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600l300 -400l300 400h-200v300h-200v-300h-200z" />
+<glyph unicode="&#xe135;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM254 780q-8 -33 5.5 -92.5t7.5 -87.5q0 -9 17 -44t16 -60 q12 0 23 -5.5t23 -15t20 -13.5q24 -12 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55t-20 -57q42 -71 87 -80q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q104 -3 221 112q30 29 47 47t34.5 49t20.5 62q-14 9 -37 9.5t-36 7.5q-14 7 -49 15t-52 19q-9 0 -39.5 -0.5 t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5t5.5 57.5 q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 39 2 44q31 -13 58 -14.5t39 3.5l11 4q7 36 -16.5 53.5t-64.5 28.5t-56 23q-19 -3 -37 0 q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6q-15 -3 -45.5 0.5t-45.5 -2.5q-21 -7 -52 -26.5t-34 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -90.5t-29.5 -79.5zM518 916q3 12 16 30t16 25q10 -10 18.5 -10t14 6t14.5 14.5t16 12.5q0 -24 17 -66.5t17 -43.5 q-9 2 -31 5t-36 5t-32 8t-30 14zM692 1003h1h-1z" />
+<glyph unicode="&#xe136;" d="M0 164.5q0 21.5 15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138l145 -232l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M0 196v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 596v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5zM0 996v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM600 596h500v100h-500v-100zM800 196h300v100h-300v-100zM900 996h200v100h-200v-100z" />
+<glyph unicode="&#xe138;" d="M100 1100v100h1000v-100h-1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
+<glyph unicode="&#xe139;" d="M0 200v200h1200v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500z M500 1000h200v100h-200v-100z" />
+<glyph unicode="&#xe140;" d="M0 0v400l129 -129l200 200l142 -142l-200 -200l129 -129h-400zM0 800l129 129l200 -200l142 142l-200 200l129 129h-400v-400zM729 329l142 142l200 -200l129 129v-400h-400l129 129zM729 871l200 200l-129 129h400v-400l-129 129l-200 -200z" />
+<glyph unicode="&#xe141;" d="M0 596q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 596q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM291 655 q0 23 15.5 38.5t38.5 15.5t39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39zM400 850q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5zM514 609q0 32 20.5 56.5t51.5 29.5l122 126l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5 q22 0 38 -16t16 -39t-16 -39t-38 -16q-14 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5zM800 655q0 22 16 38t39 16t38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39z" />
+<glyph unicode="&#xe142;" d="M-40 375q-13 -95 35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -78.5 -16.5t-67.5 -51.5l-389 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23 q38 0 53 -36q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256l7 -7l69 -60 l517 511q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163z" />
+<glyph unicode="&#xe143;" d="M80 784q0 131 98.5 229.5t230.5 98.5q143 0 241 -129q103 129 246 129q129 0 226 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-197 -191 -293 -322l-17 -23l-16 23q-43 58 -100 122.5t-92 99.5t-101 100q-71 70 -104.5 105.5t-77 89.5t-61 99 t-17.5 91zM250 784q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203l12 12q64 62 97.5 97t64.5 79t31 72q0 71 -48 119.5t-105 48.5q-74 0 -132 -83l-118 -171l-114 174q-51 80 -123 80q-60 0 -109.5 -49.5t-49.5 -118.5z" />
+<glyph unicode="&#xe144;" d="M57 353q0 -95 66 -159l141 -142q68 -66 159 -66q93 0 159 66l283 283q66 66 66 159t-66 159l-141 141q-8 9 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159zM269 706q0 -93 66 -159l141 -141q7 -7 19 -17l105 105 l-212 212l389 389l247 -247l-95 -96l18 -17q47 -49 77 -100l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159z" />
+<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM300 300h600v700h-600v-700zM496 150q0 -43 30.5 -73.5t73.5 -30.5t73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5 t-73.5 -30.5t-30.5 -73.5z" />
+<glyph unicode="&#xe146;" d="M0 0l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207z" />
+<glyph unicode="&#xe148;" d="M295 433h139q5 -77 48.5 -126.5t117.5 -64.5v335q-6 1 -15.5 4t-11.5 3q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5 v-307l64 -14q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5zM466 889q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3v274q-61 -8 -97.5 -37.5t-36.5 -102.5 zM700 237q170 18 170 151q0 64 -44 99.5t-126 60.5v-311z" />
+<glyph unicode="&#xe149;" d="M100 600v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -28 16.5 -69.5t28 -62.5t41.5 -72h241v-100h-197q8 -50 -2.5 -115 t-31.5 -94q-41 -59 -99 -113q35 11 84 18t70 7q33 1 103 -16t103 -17q76 0 136 30l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5 t-30 142.5h-221z" />
+<glyph unicode="&#xe150;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM602 900l298 300l298 -300h-198v-900h-200v900h-198z" />
+<glyph unicode="&#xe151;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v200h100v-100h200v-100h-300zM700 400v100h300v-200h-99v-100h-100v100h99v100h-200zM700 700v500h300v-500h-100v100h-100v-100h-100zM801 900h100v200h-100v-200z" />
+<glyph unicode="&#xe152;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v500h300v-500h-100v100h-100v-100h-100zM700 700v200h100v-100h200v-100h-300zM700 1100v100h300v-200h-99v-100h-100v100h99v100h-200zM801 200h100v200h-100v-200z" />
+<glyph unicode="&#xe153;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 100v400h300v-500h-100v100h-200zM800 1100v100h200v-500h-100v400h-100zM901 200h100v200h-100v-200z" />
+<glyph unicode="&#xe154;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 400v100h200v-500h-100v400h-100zM800 800v400h300v-500h-100v100h-200zM901 900h100v200h-100v-200z" />
+<glyph unicode="&#xe155;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h500v-200h-500zM700 400v200h400v-200h-400zM700 700v200h300v-200h-300zM700 1000v200h200v-200h-200z" />
+<glyph unicode="&#xe156;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h200v-200h-200zM700 400v200h300v-200h-300zM700 700v200h400v-200h-400zM700 1000v200h500v-200h-500z" />
+<glyph unicode="&#xe157;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500z" />
+<glyph unicode="&#xe158;" d="M0 400v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-163 0 -281.5 117.5t-118.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM400 300l333 250l-333 250v-500z" />
+<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 700l250 -333l250 333h-500z" />
+<glyph unicode="&#xe160;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 400h500l-250 333z" />
+<glyph unicode="&#xe161;" d="M0 400v300h300v200l400 -350l-400 -350v200h-300zM500 0v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400z" />
+<glyph unicode="&#xe162;" d="M217 519q8 -19 31 -19h302q-155 -438 -160 -458q-5 -21 4 -32l9 -8h9q14 0 26 15q11 13 274.5 321.5t264.5 308.5q14 19 5 36q-8 17 -31 17l-301 -1q1 4 78 219.5t79 227.5q2 15 -5 27l-9 9h-9q-15 0 -25 -16q-4 -6 -98 -111.5t-228.5 -257t-209.5 -237.5q-16 -19 -6 -41 z" />
+<glyph unicode="&#xe163;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300zM600 400v300h300v200l400 -350l-400 -350v200h-300z " />
+<glyph unicode="&#xe164;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98l-78 73l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5 v-300zM496 709l353 342l-149 149h500v-500l-149 149l-342 -353z" />
+<glyph unicode="&#xe165;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM406 600 q0 80 57 137t137 57t137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137z" />
+<glyph unicode="&#xe166;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 800l445 -500l450 500h-295v400h-300v-400h-300zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe167;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 700h300v-300h300v300h295l-445 500zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe168;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 705l305 -305l596 596l-154 155l-442 -442l-150 151zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe169;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 988l97 -98l212 213l-97 97zM200 400l697 1l3 699l-250 -239l-149 149l-212 -212l149 -149zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe170;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM200 612l212 -212l98 97l-213 212zM300 1200l239 -250l-149 -149l212 -212l149 148l249 -237l-1 697zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe171;" d="M23 415l1177 784v-1079l-475 272l-310 -393v416h-392zM494 210l672 938l-672 -712v-226z" />
+<glyph unicode="&#xe172;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe173;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120l-126 -127h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM581 306l123 123l120 -120l353 352l123 -123l-475 -476zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe174;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170l-298 -298h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200zM700 133l170 170l-170 170l127 127l170 -170l170 170l127 -128l-170 -169l170 -170 l-127 -127l-170 170l-170 -170z" />
+<glyph unicode="&#xe175;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300l300 -300l300 300h-200v300h-200v-300h-200zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe176;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200l-298 -298h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300h200v-300h200v300h200l-300 300zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe177;" d="M0 250q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200v-550zM0 900h1200v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 300v200h400v-200h-400z" />
+<glyph unicode="&#xe178;" d="M0 400l300 298v-198h400v-200h-400v-198zM100 800v200h100v-200h-100zM300 800v200h100v-200h-100zM500 800v200h400v198l300 -298l-300 -298v198h-400zM800 300v200h100v-200h-100zM1000 300h100v200h-100v-200z" />
+<glyph unicode="&#xe179;" d="M100 700v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300l50 100l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447zM800 597q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5 t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v1106q0 31 -18 40.5t-44 -7.5l-276 -116q-25 -17 -43.5 -51.5t-18.5 -65.5v-359z" />
+<glyph unicode="&#xe180;" d="M100 0h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5 t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56z" />
+<glyph unicode="&#xe181;" d="M0 300q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM100 100h400l200 200h105l295 98v-298h-425l-100 -100h-375zM100 300v200h300v-200h-300zM100 600v200h300v-200h-300z M100 1000h400l200 -200v-98l295 98h105v200h-425l-100 100h-375zM700 402v163l400 133v-163z" />
+<glyph unicode="&#xe182;" d="M16.5 974.5q0.5 -21.5 16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q17 18 13.5 41t-22.5 37l-192 136q-19 14 -45 12t-42 -19l-118 -118q-142 101 -268 227t-227 268l118 118q17 17 20 41.5t-11 44.5 l-139 194q-14 19 -36.5 22t-40.5 -14l-162 -162q-1 -11 -0.5 -32.5z" />
+<glyph unicode="&#xe183;" d="M0 50v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5t30 -27.5t12 -24l1 -10v-50l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-20 0 -35 14.5t-15 35.5zM0 712 q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40 t-53.5 -36.5t-31 -27.5l-9 -10v-200z" />
+<glyph unicode="&#xe184;" d="M100 0v100h1100v-100h-1100zM175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250z" />
+<glyph unicode="&#xe185;" d="M100 0h300v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400zM500 0v1000q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300zM900 0v700q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300z" />
+<glyph unicode="&#xe186;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe187;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h100v200h100v-200h100v500h-100v-200h-100v200h-100v-500zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe188;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v100h-200v300h200v100h-300v-500zM600 300h300v100h-200v300h200v100h-300v-500z" />
+<glyph unicode="&#xe189;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 550l300 -150v300zM600 400l300 150l-300 150v-300z" />
+<glyph unicode="&#xe190;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300v500h700v-500h-700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM575 549 q0 -65 27 -107t68 -42h130v300h-130q-38 0 -66.5 -43t-28.5 -108z" />
+<glyph unicode="&#xe191;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe192;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v400h-200v100h-100v-500zM301 400v200h100v-200h-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe193;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 700v100h300v-300h-99v-100h-100v100h99v200h-200zM201 300v100h100v-100h-100zM601 300v100h100v-100h-100z M700 700v100h200v-500h-100v400h-100z" />
+<glyph unicode="&#xe194;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 500v200 l100 100h300v-100h-300v-200h300v-100h-300z" />
+<glyph unicode="&#xe195;" d="M0 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 400v400h300 l100 -100v-100h-100v100h-200v-100h200v-100h-200v-100h-100zM700 400v100h100v-100h-100z" />
+<glyph unicode="&#xe197;" d="M-14 494q0 -80 56.5 -137t135.5 -57h222v300h400v-300h128q120 0 205 86.5t85 207.5t-85 207t-205 86q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200h200v300h200v-300h200 l-300 -300z" />
+<glyph unicode="&#xe198;" d="M-14 494q0 -80 56.5 -137t135.5 -57h8l414 414l403 -403q94 26 154.5 104.5t60.5 178.5q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200l300 300 l300 -300h-200v-300h-200v300h-200z" />
+<glyph unicode="&#xe199;" d="M100 200h400v-155l-75 -45h350l-75 45v155h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170z" />
+<glyph unicode="&#xe200;" d="M121 700q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350l-75 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5 t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -11.5t1 -11.5q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5z" />
+</font>
+</defs></svg> \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.ttf b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.ttf
new file mode 100644
index 000000000..67fa00bf8
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.ttf
Binary files differ
diff --git a/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.woff b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.woff
new file mode 100644
index 000000000..8c54182aa
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/fonts/glyphicons-halflings-regular.woff
Binary files differ
diff --git a/testing/web-platform/tests/tools/runner/index.html b/testing/web-platform/tests/tools/runner/index.html
new file mode 100644
index 000000000..31b1ce732
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/index.html
@@ -0,0 +1,219 @@
+<!DOCTYPE html>
+<html lang=en>
+<meta charset=UTF-8>
+<title>Web tests</title>
+<link rel='stylesheet' href='css/bootstrap.min.css'>
+<link rel='stylesheet' href='css/bootstrap-theme.min.css'>
+<link rel=stylesheet href=runner.css>
+<script src=runner.js></script>
+
+
+<header class="navbar navbar-inverse navbar-fixed-top">
+ <div class="container">
+ <div class="navbar-header">
+ <a class="navbar-brand" href="#">
+ <img src='logo.svg' width='50' height='50' alt='Logo for the WPT Runner'>
+ Web Platform Tests Runner
+ </a>
+ </div>
+ </div>
+</header>
+
+<div class="container">
+ <div id="testControl" class="panel panel-default">
+ <div class="panel-body">
+ <form id='options' class='horizontal-form' onsubmit='return false;'>
+
+ <div class="form-group">
+ <label class="col-sm-3 control-label">Test types to include</label>
+ <div class="col-sm-9">
+ <label>
+ <input type=checkbox checked value="testharness" id='th' class='test-type'>
+ JavaScript tests
+ </label>
+ <label>
+ <input type=checkbox checked value="reftest" id='ref' class='test-type'>
+ Reftests
+ </label>
+ <label>
+ <input type=checkbox checked value="manual" id='man' class='test-type'>
+ Manual tests
+ </label>
+ </div>
+ </div>
+
+ <div class="form-group">
+ <label for="path" class="col-sm-3 control-label">Run tests under path</label>
+ <div class="col-sm-9">
+ <input value="/" id='path' class='path form-control' disabled>
+ <label>
+ <input type=checkbox id='use_regex'>
+ Use regular expression
+ </label>
+ </div>
+ </div>
+
+ <div class="form-group">
+ <label for="timeout_multiplier" class="col-sm-3 control-label">Timeout multiplier</label>
+ <div class="col-sm-9">
+ <input type=number value="1" id='timeout_multiplier' class='timeout_multiplier form-control'>
+ </div>
+ </div>
+
+ <div class="form-group">
+ <label class="col-sm-3 control-label">Debug options</label>
+ <div class="col-sm-9">
+ <label>
+ <input type=checkbox id='render' value='render' class='render'>
+ Show output
+ </label>
+ <label>
+ <input type=checkbox id='dumpit'>
+ Dump JSON
+ </label>
+ </div>
+ </div>
+
+ <div class="form-group">
+ <label class="col-sm-3 control-label">Count of matching tests</label>
+ <div class="col-sm-9" id="testcount">
+ </div>
+ </div>
+
+ <div class="form-group">
+ <div class="col-sm-offset-3 col-sm-9">
+ <button type="submit" class="btn btn-success toggleStart" disabled>Start</button>
+ <button type='submit' class="btn btn-info togglePause" disabled>Pause</button>
+ </div>
+ </div>
+ </form>
+ </div>
+ </div>
+
+ <div class="instructions">
+ <p>
+ To run a set of
+ <a href="https://github.com/w3c/web-platform-tests/blob/master/README.md">web-platform-tests</a>
+ tests, specify a path value in the <b>Run tests under path</b> field above. Example paths:
+ </p>
+ <ul>
+ <li><code>/</code> - runs all of the tests from the root down</li>
+ <li><code>/websockets</code> - runs all of the
+ <a href="http://w3c-test.org/websockets/">websockets</a> tests</li>
+ <li><code>/websockets/constructor</code> - runs all of the
+ <a href="http://w3c-test.org/websockets/constructor/">websockets/constructor</a> tests</li>
+ <li><code>/html/syntax/parsing</code> - runs all of the
+ <a href="http://w3c-test.org/html/syntax/parsing/">html/syntax/parsing</a> tests</li>
+ </ul>
+ <p>
+ Multiple test paths can be specified by separating them with comma or whitespace. For example,
+ <code>/js, /html</code> will run the <a href="http://w3c-test.org/js/">js</a> <em>and</em> <a href="http://w3c-test.org/html/">html</a>
+ tests.
+ </p>
+ <p>
+ <a href="http://www.w3schools.com/jsref/jsref_obj_regexp.asp" target="_blank">Javascript regular expressions</a> are also supported for filtering. When the option is checked,
+ only a test path matching the regex pattern will run. For example, you can specify <code>^/js/|^/html/</code>
+ to run the <a href="http://w3c-test.org/js/">js</a> <em>and</em> <a href="http://w3c-test.org/html/">html</a>
+ tests.
+ </p>
+ <p>
+ If the test runner is run online, the set of tests available to run can be found in the
+ <a href="http://w3c-test.org/">w3c-test.org</a> test repository.
+ </p>
+ <p>
+ Tests will run in a new window. For reftests and manual tests it’s best
+ to put that window side-by-side with this one.
+ </p>
+ </div>
+
+ <div id="output">
+ <div class="summary clearfix">
+ <h4>Progress
+ <span id="manifest">updating and loading test manifest; this may take several minutes</span>
+ </h4>
+ <div class="progress">
+ <div class="progress-bar" role="progressbar"
+ aria-valuenow="0" aria-valuemin="0" aria-valuemax="0" style="width: 0">
+ 0%
+ </div>
+ </div>
+ <div id='test_url'></div>
+ <table class='table'>
+ <thead>
+ <tr>
+ <th></th>
+ <th>Passed</th>
+ <th>Failed</th>
+ <th>Timeouts</th>
+ <th>Errors</th>
+ <th>Not Run</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td></td>
+ <td class='PASS'>0</td>
+ <td class='FAIL'>0</td>
+ <td class='TIMEOUT'>0</td>
+ <td class='ERROR'>0</td>
+ <td class='NOTRUN'>0</td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Display:
+ </label>
+ </td>
+ <td><input type=checkbox class="result-display-filter" value="PASS" checked></td>
+ <td><input type=checkbox class="result-display-filter" value="FAIL" checked></td>
+ <td><input type=checkbox class="result-display-filter" value="TIMEOUT" checked></td>
+ <td><input type=checkbox class="result-display-filter" value="ERROR" checked></td>
+ <td><input type=checkbox class="result-display-filter" value="NOTRUN" checked></td>
+ </tr>
+ </tbody>
+ </table>
+ <a class="jsonResults btn btn-primary pull-right">Download JSON results</a>
+ </div>
+
+ <div class="results">
+ <div id="manualUI">
+ <div class='panel panel-primary'>
+ <div class='panel-heading'>
+ <h4 class='panel-title'>Manual Testing</h4>
+ </div>
+ <div class="panel-body reftestUI">
+ <p>
+ The current test requires manual result marking.
+ Test and ref should compare <strong class="refType text-primary"></strong>
+ </p>
+ <p>
+ <button class="btn btn-info test">Show Test</button>
+ <button class="btn btn-info ref">Show Reference</button>
+ <span class="reftestWarn"></span>
+ </p>
+ </div>
+ <div class="panel-footer">
+ The test has:
+ <button class="btn btn-success pass">Passed</button>
+ <button class="btn btn-danger fail">Failed</button>
+ </div>
+ </div>
+ </div>
+
+ <hr>
+ <h4>Details</h4>
+ <table class='table'>
+ <thead>
+ <tr>
+ <th>Test
+ <th>Status
+ <th>Message
+ <th>Subtest Pass Rate
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
+ </div>
+ </div>
+
+</div>
diff --git a/testing/web-platform/tests/tools/runner/logo.svg b/testing/web-platform/tests/tools/runner/logo.svg
new file mode 100644
index 000000000..8a25776b6
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/logo.svg
@@ -0,0 +1,8 @@
+<svg width="453px" height="453px" viewBox="0 0 453 453" xmlns="http://www.w3.org/2000/svg">
+ <circle stroke="#ed5565" stroke-width="20" fill="#fff" cx="226.5" cy="226.5" r="213.5"></circle>
+ <g transform="translate(160 250)">
+ <g transform="matrix(2.25 0 0 -2.25 0 0)">
+ <path d="m0 0c0.252-0.333 0.508-0.659 0.766-0.974-1.269-3.764-2.368-7.749-3.447-11.383-0.166-0.053-0.329-0.116-0.486-0.194-8.572 8.211-2.056 11.364 3.167 12.551m59.809 45.57c2.899 20.118-10.084 38.635-29 41.362-18.915 2.726-36.601-11.371-39.5-31.489-2.418-16.774 6.212-32.427 20.112-38.75l-0.05 0.014-0.323-2.242c0.219-0.049 0.427-0.09 0.64-0.134-0.122-0.027-0.245-0.053-0.365-0.086l-0.004 0.013s-0.042-0.017-0.106-0.042c-0.465-0.137-0.915-0.319-1.349-0.544-3.502-1.424-12.285-5.095-14.287-6.867 0 0 0.927-1.665 2.254-3.704-17.335-2.421-6.522-15.63-3.176-19.26-0.015-0.296 0.004-0.592 0.039-0.888-2.562-1.677-5.772-4.195-7.772-7.29 0 0 0.552-0.182 1.458-0.536v-0.001l0.016-0.005c1.182-0.463 2.96-1.219 4.914-2.248-3.004-13.557-7.542-9.677-10.589-4.683 0.745 2.232 0.741 4.23-0.202 5.393-0.66 0.811-1.681 1.101-2.889 0.931-0.087 0.222-0.135 0.349-0.135 0.349l-0.108-0.397c-0.057-0.01-0.115-0.021-0.173-0.034 0.005 0.003 0.008 0.005 0.013 0.008 0 0-0.032-0.008-0.078-0.021-0.508-0.116-1.042-0.306-1.593-0.566-2.759-1.158-8.023-4.248-8.639-11.088-0.208-1.056-0.212-2.015 0.002-2.812 0.001-0.014 0.001-0.026 0.003-0.04 0 0 0.527-4.561 4.288-1.894l-0.002 0.003c1.861 0.536 4.034 2.003 5.989 4.226 0.664 0.755 1.256 1.545 1.768 2.343 8.537-16.768 14.974 3.409 15.81 6.23 2.309-1.538 4.528-3.425 6.019-5.64 0 0 1.182 3.458 3.942 6.312 5.984-8.956 13.374-10.465 13.374-10.465l3.802 6.152c11.328-5.569 7.382-10.385 2.713-13.253-1.757 1.198-3.428 1.485-4.537 0.55-0.775-0.653-1.166-1.807-1.2-3.246-0.199-0.069-0.312-0.106-0.312-0.106l0.316-0.185c0.001-0.069 0.002-0.139 0.004-0.21-0.002 0.006-0.004 0.012-0.006 0.019 0 0 0.003-0.044 0.007-0.112 0.024-0.604 0.104-1.247 0.239-1.92 0.564-3.395 2.378-10.019 8.013-11.741 0.851-0.396 1.652-0.542 2.349-0.407 0.012 0 0.023-0.003 0.035-0.003 0 0 3.891-0.048 2.21 4.745l-0.004-0.004c-0.176 2.258-1.086 5.015-2.659 7.628-0.535 0.888-1.109 1.695-1.701 2.413 16.374 8.095-3.15 19.567-3.156 19.57l2.062 3.336-4.584 1.028c-0.516 0.116-1.446 0.458-2.639 0.949 0.61-0.116 1.218-0.225 1.821-0.322 0.221 0.615 0.432 1.249 0.631 1.918 1.715 5.766 2.34 12.577 1.803 18.76l1.544-3.601s0.655 0.404 1.612 0.827l-0.088-0.167c3.832-26.906 14.631-10.666 17.407-5.924 1.445 0.125 2.819 1.27 3.448 3.074 0.864 2.475 0.002 5.242-1.926 6.183-1.927 0.942-4.188-0.301-5.05-2.774-0.533-1.524-0.406-3.158 0.218-4.41-6.67-13.044-10.36-1.016-11.647 4.81 0.669 0.143 1.355 0.21 1.998 0.135 0 0-4.185 11.234-11.743 15.618-0.097 0.136-0.192 0.275-0.291 0.405-0.056-0.017-0.116-0.029-0.174-0.044l0.345 2.832c0.567 0.046 0.871 0.099 0.871 0.099l0.021 0.146-0.65 0.181c18.572-2.158 35.744 11.797 38.597 31.593" fill="#ed5565"/>
+ </g>
+ </g>
+</svg>
diff --git a/testing/web-platform/tests/tools/runner/report.css b/testing/web-platform/tests/tools/runner/report.css
new file mode 100644
index 000000000..bc25f9e1b
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/report.css
@@ -0,0 +1,43 @@
+table {
+ border-collapse: collapse;
+}
+
+tbody {
+ border-top: thin solid;
+ border-bottom: thin solid;
+}
+
+.status {
+ font-variant:small-caps;
+ color:white;
+ text-align:center;
+ font-variant:small-caps;
+}
+
+.PASS, .OK {
+ background-color:green;
+}
+
+.FAIL {
+ background-color:red;
+}
+
+.ERROR, .NOTRUN, .NONE {
+ background-color:black;
+}
+
+.TIMEOUT {
+ background-color:blue;
+}
+
+td {
+ padding:0.25em;
+}
+
+tr.test {
+ background-color:#ddd;
+}
+
+tr.subtest {
+ background-color:#eee;
+}
diff --git a/testing/web-platform/tests/tools/runner/report.py b/testing/web-platform/tests/tools/runner/report.py
new file mode 100644
index 000000000..f4b3fa716
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/report.py
@@ -0,0 +1,307 @@
+from __future__ import print_function
+
+import argparse
+import json
+import sys
+from cgi import escape
+from collections import defaultdict
+
+import types
+
+
+def html_escape(item, escape_quote=False):
+ if isinstance(item, types.StringTypes):
+ rv = escape(item)
+ if escape_quote:
+ rv = rv.replace('"', "&quot;")
+ return rv
+ else:
+ return item
+
+
+class Raw(object):
+ """Simple wrapper around a string to stop it being escaped by html_escape"""
+ def __init__(self, value):
+ self.value = value
+
+ def __unicode__(self):
+ return unicode(self.value)
+
+
+class Node(object):
+ """Node structure used when building HTML"""
+ def __init__(self, name, attrs, children):
+ #Need list of void elements
+ self.name = name
+ self.attrs = attrs
+ self.children = children
+
+ def __unicode__(self):
+ if self.attrs:
+ #Need to escape
+ attrs_unicode = " " + " ".join("%s=\"%s\"" % (html_escape(key),
+ html_escape(value,
+ escape_quote=True))
+ for key, value in self.attrs.iteritems())
+ else:
+ attrs_unicode = ""
+ return "<%s%s>%s</%s>\n" % (self.name,
+ attrs_unicode,
+ "".join(unicode(html_escape(item))
+ for item in self.children),
+ self.name)
+
+ def __str__(self):
+ return unicode(self).encode("utf8")
+
+
+class RootNode(object):
+ """Special Node representing the document root"""
+ def __init__(self, *children):
+ self.children = ["<!DOCTYPE html>"] + list(children)
+
+ def __unicode__(self):
+ return "".join(unicode(item) for item in self.children)
+
+ def __str__(self):
+ return unicode(self).encode("utf8")
+
+
+def flatten(iterable):
+ """Flatten a list of lists by one level so that
+ [1,["abc"], "def",[2, [3]]]
+ becomes
+ [1, "abc", "def", 2, [3]]"""
+ rv = []
+ for item in iterable:
+ if hasattr(item, "__iter__") and not isinstance(item, types.StringTypes):
+ rv.extend(item)
+ else:
+ rv.append(item)
+ return rv
+
+
+class HTML(object):
+ """Simple HTML templating system. An instance of this class can create
+ element nodes by calling methods with the same name as the element,
+ passing in children as positional arguments or as a list, and attributes
+ as keyword arguments, with _ replacing - and trailing _ for python keywords
+
+ e.g.
+
+ h = HTML()
+ print(h.html(
+ html.head(),
+ html.body([html.h1("Hello World!")], class_="body-class")
+ ))
+ Would give
+ <!DOCTYPE html><html><head></head><body class="body-class"><h1>Hello World!</h1></body></html>"""
+ def __getattr__(self, name):
+ def make_html(self, *content, **attrs):
+ for attr_name in attrs.keys():
+ if "_" in attr_name:
+ new_name = attr_name.replace("_", "-")
+ if new_name.endswith("-"):
+ new_name = new_name[:-1]
+ attrs[new_name] = attrs.pop(attr_name)
+ return Node(name, attrs, flatten(content))
+
+ method = types.MethodType(make_html, self, HTML)
+ setattr(self, name, method)
+ return method
+
+ def __call__(self, *children):
+ return RootNode(*flatten(children))
+
+
+h = HTML()
+
+
+class TestResult(object):
+ """Simple holder for the results of a single test in a single UA"""
+ def __init__(self, test):
+ self.test = test
+ self.results = {}
+
+ def __cmp__(self, other):
+ return self.test == other.test
+
+ def __hash__(self):
+ return hash(self.test)
+
+
+def load_data(args):
+ """Load data treating args as a list of UA name, filename pairs"""
+ pairs = []
+ for i in xrange(0, len(args), 2):
+ pairs.append(args[i:i+2])
+
+ rv = {}
+ for UA, filename in pairs:
+ with open(filename) as f:
+ rv[UA] = json.load(f)
+
+ return rv
+
+
+def test_id(id):
+ """Convert a test id in JSON into an immutable object that
+ can be used as a dictionary key"""
+ if isinstance(id, list):
+ return tuple(id)
+ else:
+ return id
+
+
+def all_tests(data):
+ tests = defaultdict(set)
+ for UA, results in data.iteritems():
+ for result in results["results"]:
+ id = test_id(result["test"])
+ tests[id] |= set(subtest["name"] for subtest in result["subtests"])
+ return tests
+
+
+def group_results(data):
+ """Produce a list of UAs and a dictionary mapping specific tests to their
+ status in all UAs e.g.
+ ["UA1", "UA2"], {"test_id":{"harness":{"UA1": (status1, message1),
+ "UA2": (status2, message2)},
+ "subtests":{"subtest1": "UA1": (status1-1, message1-1),
+ "UA2": (status2-1, message2-1)}}}
+ Status and message are None if the test didn't run in a particular UA.
+ Message is None if the test didn't produce a message"""
+ tests = all_tests(data)
+
+ UAs = data.keys()
+
+ def result():
+ return {
+ "harness": dict((UA, (None, None)) for UA in UAs),
+ "subtests": None # init this later
+ }
+
+ results_by_test = defaultdict(result)
+
+ for UA, results in data.iteritems():
+ for test_data in results["results"]:
+ id = test_id(test_data["test"])
+ result = results_by_test[id]
+
+ if result["subtests"] is None:
+ result["subtests"] = dict(
+ (name, dict((UA, (None, None)) for UA in UAs)) for name in tests[id]
+ )
+
+ result["harness"][UA] = (test_data["status"], test_data["message"])
+ for subtest in test_data["subtests"]:
+ result["subtests"][subtest["name"]][UA] = (subtest["status"],
+ subtest["message"])
+
+ return UAs, results_by_test
+
+
+def status_cell(status, message=None):
+ """Produce a table cell showing the status of a test"""
+ status = status if status is not None else "NONE"
+ kwargs = {}
+ if message:
+ kwargs["title"] = message
+ status_text = status.title()
+ return h.td(status_text, class_="status " + status,
+ **kwargs)
+
+
+def test_link(test_id, subtest=None):
+ """Produce an <a> element linking to a test"""
+ if isinstance(test_id, types.StringTypes):
+ rv = [h.a(test_id, href=test_id)]
+ else:
+ rv = [h.a(test_id[0], href=test_id[0]),
+ " %s " % test_id[1],
+ h.a(test_id[2], href=test_id[2])]
+ if subtest is not None:
+ rv.append(" [%s]" % subtest)
+ return rv
+
+
+def summary(UAs, results_by_test):
+ """Render the implementation report summary"""
+ not_passing = []
+ for test, results in results_by_test.iteritems():
+ if not any(item[0] in ("PASS", "OK") for item in results["harness"].values()):
+ not_passing.append((test, None))
+ for subtest_name, subtest_results in results["subtests"].iteritems():
+ if not any(item[0] == "PASS" for item in subtest_results.values()):
+ not_passing.append((test, subtest_name))
+ if not_passing:
+ rv = [
+ h.p("The following tests failed to pass in all UAs:"),
+ h.ul([h.li(test_link(test, subtest))
+ for test, subtest in not_passing])
+ ]
+ else:
+ rv = "All tests passed in at least one UA"
+ return rv
+
+
+def result_rows(UAs, test, result):
+ """Render the results for each test run"""
+ yield h.tr(
+ h.td(
+ test_link(test),
+ rowspan=(1 + len(result["subtests"]))
+ ),
+ h.td(),
+ [status_cell(status, message)
+ for UA, (status, message) in sorted(result["harness"].items())],
+ class_="test"
+ )
+
+ for name, subtest_result in sorted(result["subtests"].iteritems()):
+ yield h.tr(
+ h.td(name),
+ [status_cell(status, message)
+ for UA, (status, message) in sorted(subtest_result.items())],
+ class_="subtest"
+ )
+
+
+def result_bodies(UAs, results_by_test):
+ return [h.tbody(result_rows(UAs, test, result))
+ for test, result in sorted(results_by_test.iteritems())]
+
+
+def generate_html(UAs, results_by_test):
+ """Generate all the HTML output"""
+ return h(h.html(
+ h.head(
+ h.meta(charset="utf8"),
+ h.title("Implementation Report"),
+ h.link(href="report.css", rel="stylesheet")),
+ h.body(
+ h.h1("Implementation Report"),
+ h.h2("Summary"),
+ summary(UAs, results_by_test),
+ h.h2("Full Results"),
+ h.table(
+ h.thead(
+ h.tr(
+ h.th("Test"),
+ h.th("Subtest"),
+ [h.th(UA) for UA in sorted(UAs)])),
+ result_bodies(UAs, results_by_test)))))
+
+
+def main(filenames):
+ data = load_data(filenames)
+ UAs, results_by_test = group_results(data)
+ return generate_html(UAs, results_by_test)
+
+
+if __name__ == "__main__":
+ if not sys.argv[1:]:
+ print("""Please supply a list of UA name, filename pairs e.g.
+
+python report.py Firefox firefox.json Chrome chrome.json IE internet_explorer.json""")
+ print(main(sys.argv[1:]))
diff --git a/testing/web-platform/tests/tools/runner/runner.css b/testing/web-platform/tests/tools/runner/runner.css
new file mode 100644
index 000000000..983532fea
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/runner.css
@@ -0,0 +1,208 @@
+
+html {
+ margin: 0 8px;
+}
+
+body {
+ margin: 0;
+}
+
+html.done {
+ border: 2px solid #32cd32;
+ margin: 3px;
+ padding: 3px;
+}
+
+html:not(.done) {
+ height: 100%;
+}
+
+html:not(.done) body {
+ height: 100%;
+}
+
+html:not(.done) #wrapper {
+ height: 100%;
+ margin-top: -1.5em;
+ padding-top: 1.5em;
+ -moz-box-sizing: border-box;
+ -webkit-box-sizing: border-box;
+ box-sizing: border-box;
+}
+
+section {
+ display: block;
+ border: thin solid black;
+ padding: 0.5em 0;
+}
+
+section h1 {
+ margin: 0;
+ font-size: 1em;
+}
+
+html.done section h1 {
+ text-align: center;
+}
+
+section ol {
+ padding: 0;
+ margin: 0;
+ list-style-position: inside;
+}
+
+html.done section ol {
+ -moz-column-count: 3;
+ -webkit-column-count: 3;
+ column-count: 3;
+}
+
+section li {
+ padding: 0.1em 0.5em;
+}
+
+section li.pass:nth-child(odd) {
+ background: #e5ffe5;
+}
+section li.pass:nth-child(even) {
+ background: #def8de;
+}
+
+section li.fail:nth-child(odd) {
+ background: #ffe5e5;
+}
+
+section li.fail:nth-child(even) {
+ background: #f8dede;
+}
+
+section p {
+ margin: 0;
+}
+
+html:not(.done) section {
+ border-top: none;
+}
+
+html.done section + section {
+ border-top: none;
+}
+
+#manualUI {
+ position: fixed;
+ z-index: 2000;
+ top: -20px;
+ left: 0;
+ right: 0;
+ display: block;
+ padding: 40px;
+ background: rgba(255, 200, 0, 0.5);
+}
+
+#manualUI .panel {
+ max-width: 800px;
+ margin: auto;
+ box-shadow: 2px 2px 10px #666;
+}
+
+body > p {
+ text-align: center;
+}
+
+body > p > textarea {
+ width: 90%;
+ height: 20em;
+}
+
+body {
+ padding-top: 70px;
+}
+
+.container {
+ max-width: 800px;
+}
+
+.navbar-brand > img {
+ display: inline;
+}
+
+.navbar-inverse .navbar-brand {
+ color: #fff;
+}
+
+.form-group {
+ clear:both;
+}
+
+.horizontal-form .form-group {
+ padding: 6px;
+}
+
+header.navbar-inverse {
+ background: linear-gradient(to bottom, rgb(7, 62, 128) 0px, rgb(0, 45, 80) 100%);
+}
+
+ul.error, ul.warning {
+ padding: 0;
+}
+
+td.PASS {
+ color: #48cfad;
+}
+
+td.FAIL {
+ color: #ed5565;
+}
+
+td.TIMEOUT {
+ color: #f6bb42;
+}
+
+td.NOTRUN {
+ color: #00c;
+}
+
+td.ERROR {
+ color: #da4453;
+ font-weight: bold;
+}
+.stopped {
+ background-image: linear-gradient(to bottom, #fc0000 0, #770000 100%);
+}
+
+.col-sm-9 label {
+ margin-right: 20px;
+}
+
+.instructions {
+ padding-left: 8px;
+ padding-right: 8px;
+}
+
+@keyframes alert_updating {
+ from {
+ background-color: inherit;
+ }
+ to {
+ background-color: #ffc;
+ }
+}
+
+#manifest {
+ padding-left: 6px;
+ padding-right: 6px;
+ font-size: 80%;
+ font-weight: normal;
+ font-style: italic;
+ color: #999;
+ animation-duration: 1.5s;
+ animation-name: alert_updating;
+ animation-iteration-count: infinite;
+ animation-direction: alternate;
+}
+
+.reftestWarn {
+ color: yellow;
+ background: black;
+ padding: 8px;
+}
diff --git a/testing/web-platform/tests/tools/runner/runner.js b/testing/web-platform/tests/tools/runner/runner.js
new file mode 100644
index 000000000..4319c234e
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/runner.js
@@ -0,0 +1,854 @@
+/*jshint nonew: false */
+(function() {
+"use strict";
+var runner;
+var testharness_properties = {output:false,
+ timeout_multiplier:1};
+
+function Manifest(path) {
+ this.data = null;
+ this.path = path;
+ this.num_tests = null;
+}
+
+Manifest.prototype = {
+ load: function(loaded_callback) {
+ this.generate(loaded_callback);
+ },
+
+ do_load: function(loaded_callback) {
+ var xhr = new XMLHttpRequest();
+ xhr.onreadystatechange = function() {
+ if (xhr.readyState !== 4) {
+ return;
+ }
+ if (!(xhr.status === 200 || xhr.status === 0)) {
+ throw new Error("Manifest " + this.path + " failed to load");
+ }
+ this.data = JSON.parse(xhr.responseText);
+ loaded_callback();
+ }.bind(this);
+ xhr.open("GET", this.path);
+ xhr.send(null);
+ },
+
+ generate: function(loaded_callback) {
+ var xhr = new XMLHttpRequest();
+ xhr.onreadystatechange = function() {
+ if (xhr.readyState !== 4) {
+ return;
+ }
+ if (!(xhr.status === 200 || xhr.status === 0)) {
+ throw new Error("Manifest generation failed");
+ }
+ this.do_load(loaded_callback);
+ }.bind(this);
+ xhr.open("POST", "update_manifest.py");
+ xhr.send(null);
+ },
+
+ by_type:function(type) {
+ var ret = [] ;
+ if (this.data.items.hasOwnProperty(type)) {
+ ret = this.data.items[type].slice(0) ;
+ }
+ // local_changes.items in manifest is an Object just as
+ // items is. However, the properties of local_changes.items
+ // are Objects and the properties of items are Arrays.
+ // So we need to extract any relevant local changes by iterating
+ // over the Object and pulling out the referenced nodes as array items.
+ if (this.data.hasOwnProperty("local_changes")) {
+ var local = this.data.local_changes ;
+ // add in any local items
+ if (local.items.hasOwnProperty(type)) {
+ Object.keys(local.items[type]).forEach(function(ref) {
+ ret.push(local.items[type][ref][0]) ;
+ }.bind(this));
+ }
+ // remove any items that are locally deleted but not yet committed
+ // note that the deleted and deleted_reftests properties of the local_changes
+ // object are always present, even if they are empty
+ if (ret.length && local.deleted.length) {
+ // make a hash of the deleted to speed searching
+ var dels = {} ;
+ local.deleted.forEach(function(x) { dels[x] = true; } );
+ for (var j = ret.length-1; j >= 0; j--) {
+ if ( dels[ret[j].path] || (type === "reftest" && local.deleted_reftests[ret[j].path]) ){
+ // we have a match
+ ret.splice(j, 1) ;
+ }
+ }
+ }
+ }
+ return ret ;
+ }
+};
+
+function ManifestIterator(manifest, path, test_types, use_regex) {
+ this.manifest = manifest;
+ this.paths = null;
+ this.regex_pattern = null;
+ this.test_types = test_types;
+ this.test_types_index = -1;
+ this.test_list = null;
+ this.test_index = null;
+
+ if (use_regex) {
+ this.regex_pattern = path;
+ } else {
+ // Split paths by either a comma or whitespace, and ignore empty sub-strings.
+ this.paths = path.split(/[,\s]+/).filter(function(s) { return s.length > 0; });
+ }
+}
+
+ManifestIterator.prototype = {
+ next: function() {
+ var manifest_item = null;
+
+ if (this.test_types.length === 0) {
+ return null;
+ }
+
+ while (!manifest_item) {
+ while (this.test_list === null || this.test_index >= this.test_list.length) {
+ this.test_types_index++;
+ if (this.test_types_index >= this.test_types.length) {
+ return null;
+ }
+ this.test_index = 0;
+ this.test_list = this.manifest.by_type(this.test_types[this.test_types_index]);
+ }
+
+ manifest_item = this.test_list[this.test_index++];
+ while (manifest_item && !this.matches(manifest_item)) {
+ manifest_item = this.test_list[this.test_index++];
+ }
+ if (manifest_item) {
+ return this.to_test(manifest_item);
+ }
+ }
+ },
+
+ matches: function(manifest_item) {
+ if (this.regex_pattern !== null) {
+ return manifest_item.url.match(this.regex_pattern);
+ } else {
+ return this.paths.some(function(p) {
+ return manifest_item.url.indexOf(p) === 0;
+ });
+ }
+ },
+
+ to_test: function(manifest_item) {
+ var test = {
+ type: this.test_types[this.test_types_index],
+ url: manifest_item.url
+ };
+ if (manifest_item.hasOwnProperty("references")) {
+ test.ref_length = manifest_item.references.length;
+ test.ref_type = manifest_item.references[0][1];
+ test.ref_url = manifest_item.references[0][0];
+ }
+ return test;
+ },
+
+ count: function() {
+ return this.test_types.reduce(function(prev, current) {
+ var matches = this.manifest.by_type(current).filter(function(x) {
+ return this.matches(x);
+ }.bind(this));
+ return prev + matches.length;
+ }.bind(this), 0);
+ }
+};
+
+function VisualOutput(elem, runner) {
+ this.elem = elem;
+ this.runner = runner;
+ this.results_table = null;
+ this.section_wrapper = null;
+ this.results_table = this.elem.querySelector(".results > table");
+ this.section = null;
+ this.manifest_status = this.elem.querySelector("#manifest");
+ this.progress = this.elem.querySelector(".summary .progress");
+ this.meter = this.progress.querySelector(".progress-bar");
+ this.result_count = null;
+ this.json_results_area = this.elem.querySelector("textarea");
+ this.instructions = document.querySelector(".instructions");
+
+ this.elem.style.display = "none";
+ this.runner.manifest_wait_callbacks.push(this.on_manifest_wait.bind(this));
+ this.runner.start_callbacks.push(this.on_start.bind(this));
+ this.runner.result_callbacks.push(this.on_result.bind(this));
+ this.runner.done_callbacks.push(this.on_done.bind(this));
+
+ this.display_filter_state = {};
+
+ var visual_output = this;
+ var display_filter_inputs = this.elem.querySelectorAll(".result-display-filter");
+ for (var i = 0; i < display_filter_inputs.length; ++i) {
+ var display_filter_input = display_filter_inputs[i];
+ this.display_filter_state[display_filter_input.value] = display_filter_input.checked;
+ display_filter_input.addEventListener("change", function(e) {
+ visual_output.apply_display_filter(e.target.value, e.target.checked);
+ })
+ }
+}
+
+VisualOutput.prototype = {
+ clear: function() {
+ this.result_count = {"PASS":0,
+ "FAIL":0,
+ "ERROR":0,
+ "TIMEOUT":0,
+ "NOTRUN":0};
+ for (var p in this.result_count) {
+ if (this.result_count.hasOwnProperty(p)) {
+ this.elem.querySelector("td." + p).textContent = 0;
+ }
+ }
+ if (this.json_results_area) {
+ this.json_results_area.parentNode.removeChild(this.json_results_area);
+ }
+ this.meter.style.width = '0px';
+ this.meter.textContent = '0%';
+ this.manifest_status.style.display = "none";
+ this.elem.querySelector(".jsonResults").style.display = "none";
+ this.results_table.removeChild(this.results_table.tBodies[0]);
+ this.results_table.appendChild(document.createElement("tbody"));
+ },
+
+ on_manifest_wait: function() {
+ this.clear();
+ this.instructions.style.display = "none";
+ this.elem.style.display = "block";
+ this.manifest_status.style.display = "inline";
+ },
+
+ on_start: function() {
+ this.clear();
+ this.instructions.style.display = "none";
+ this.elem.style.display = "block";
+ this.meter.classList.remove("stopped");
+ this.meter.classList.add("progress-striped", "active");
+ },
+
+ on_result: function(test, status, message, subtests) {
+ var row = document.createElement("tr");
+
+ var subtest_pass_count = subtests.reduce(function(prev, current) {
+ return (current.status === "PASS") ? prev + 1 : prev;
+ }, 0);
+
+ var subtest_notrun_count = subtests.reduce(function(prev, current) {
+ return (current.status === "NOTRUN") ? prev +1 : prev;
+ }, 0);
+
+ var subtests_count = subtests.length;
+
+ var test_status;
+ if (subtest_pass_count === subtests_count &&
+ (status == "OK" || status == "PASS")) {
+ test_status = "PASS";
+ } else if (subtest_notrun_count == subtests_count) {
+ test_status = "NOTRUN";
+ } else if (subtests_count > 0 && status === "OK") {
+ test_status = "FAIL";
+ } else {
+ test_status = status;
+ }
+
+ subtests.forEach(function(subtest) {
+ if (this.result_count.hasOwnProperty(subtest.status)) {
+ this.result_count[subtest.status] += 1;
+ }
+ }.bind(this));
+ if (this.result_count.hasOwnProperty(status)) {
+ this.result_count[status] += 1;
+ }
+
+ var name_node = row.appendChild(document.createElement("td"));
+ name_node.appendChild(this.test_name_node(test));
+
+ var status_node = row.appendChild(document.createElement("td"));
+ status_node.textContent = test_status;
+ status_node.className = test_status;
+
+ var message_node = row.appendChild(document.createElement("td"));
+ message_node.textContent = message || "";
+
+ var subtests_node = row.appendChild(document.createElement("td"));
+ if (subtests_count) {
+ subtests_node.textContent = subtest_pass_count + "/" + subtests_count;
+ } else {
+ if (status == "PASS") {
+ subtests_node.textContent = "1/1";
+ } else {
+ subtests_node.textContent = "0/1";
+ }
+ }
+
+ var status_arr = ["PASS", "FAIL", "ERROR", "TIMEOUT", "NOTRUN"];
+ for (var i = 0; i < status_arr.length; i++) {
+ this.elem.querySelector("td." + status_arr[i]).textContent = this.result_count[status_arr[i]];
+ }
+
+ this.apply_display_filter_to_result_row(row, this.display_filter_state[test_status]);
+ this.results_table.tBodies[0].appendChild(row);
+ this.update_meter(this.runner.progress(), this.runner.results.count(), this.runner.test_count());
+ },
+
+ on_done: function() {
+ this.meter.setAttribute("aria-valuenow", this.meter.getAttribute("aria-valuemax"));
+ this.meter.style.width = "100%";
+ if (this.runner.stop_flag) {
+ this.meter.textContent = "Stopped";
+ this.meter.classList.add("stopped");
+ } else {
+ this.meter.textContent = "Done!";
+ }
+ this.meter.classList.remove("progress-striped", "active");
+ this.runner.test_div.textContent = "";
+ //add the json serialization of the results
+ var a = this.elem.querySelector(".jsonResults");
+ var json = this.runner.results.to_json();
+
+ if (document.getElementById("dumpit").checked) {
+ this.json_results_area = Array.prototype.slice.call(this.elem.querySelectorAll("textarea"));
+ for(var i = 0,t = this.json_results_area.length; i < t; i++){
+ this.elem.removeChild(this.json_results_area[i]);
+ }
+ this.json_results_area = document.createElement("textarea");
+ this.json_results_area.style.width = "100%";
+ this.json_results_area.setAttribute("rows", "50");
+ this.elem.appendChild(this.json_results_area);
+ this.json_results_area.textContent = json;
+ }
+ var blob = new Blob([json], { type: "application/json" });
+ a.href = window.URL.createObjectURL(blob);
+ a.download = "runner-results.json";
+ a.textContent = "Download JSON results";
+ if (!a.getAttribute("download")) a.textContent += " (right-click and save as to download)";
+ a.style.display = "inline";
+ },
+
+ test_name_node: function(test) {
+ if (!test.hasOwnProperty("ref_url")) {
+ return this.link(test.url);
+ } else {
+ var wrapper = document.createElement("span");
+ wrapper.appendChild(this.link(test.url));
+ wrapper.appendChild(document.createTextNode(" " + test.ref_type + " "));
+ wrapper.appendChild(this.link(test.ref_url));
+ return wrapper;
+ }
+ },
+
+ link: function(href) {
+ var link = document.createElement("a");
+ link.href = this.runner.server + href;
+ link.textContent = href;
+ return link;
+ },
+
+ update_meter: function(progress, count, total) {
+ this.meter.setAttribute("aria-valuenow", count);
+ this.meter.setAttribute("aria-valuemax", total);
+ this.meter.textContent = this.meter.style.width = (progress * 100).toFixed(1) + "%";
+ },
+
+ apply_display_filter: function(test_status, display_state) {
+ this.display_filter_state[test_status] = display_state;
+ var result_cells = this.elem.querySelectorAll(".results > table tr td." + test_status);
+ for (var i = 0; i < result_cells.length; ++i) {
+ this.apply_display_filter_to_result_row(result_cells[i].parentNode, display_state)
+ }
+ },
+
+ apply_display_filter_to_result_row: function(result_row, display_state) {
+ result_row.style.display = display_state ? "" : "none";
+ }
+};
+
+function ManualUI(elem, runner) {
+ this.elem = elem;
+ this.runner = runner;
+ this.pass_button = this.elem.querySelector("button.pass");
+ this.fail_button = this.elem.querySelector("button.fail");
+ this.ref_buttons = this.elem.querySelector(".reftestUI");
+ this.ref_type = this.ref_buttons.querySelector(".refType");
+ this.ref_warning = this.elem.querySelector(".reftestWarn");
+ this.test_button = this.ref_buttons.querySelector("button.test");
+ this.ref_button = this.ref_buttons.querySelector("button.ref");
+
+ this.hide();
+
+ this.runner.test_start_callbacks.push(this.on_test_start.bind(this));
+ this.runner.test_pause_callbacks.push(this.hide.bind(this));
+ this.runner.done_callbacks.push(this.on_done.bind(this));
+
+ this.pass_button.onclick = function() {
+ this.disable_buttons();
+ this.runner.on_result("PASS", "", []);
+ }.bind(this);
+
+ this.fail_button.onclick = function() {
+ this.disable_buttons();
+ this.runner.on_result("FAIL", "", []);
+ }.bind(this);
+}
+
+ManualUI.prototype = {
+ show: function() {
+ this.elem.style.display = "block";
+ setTimeout(this.enable_buttons.bind(this), 200);
+ },
+
+ hide: function() {
+ this.elem.style.display = "none";
+ },
+
+ show_ref: function() {
+ this.ref_buttons.style.display = "block";
+ this.test_button.onclick = function() {
+ this.runner.load(this.runner.current_test.url);
+ }.bind(this);
+ this.ref_button.onclick = function() {
+ this.runner.load(this.runner.current_test.ref_url);
+ }.bind(this);
+ },
+
+ hide_ref: function() {
+ this.ref_buttons.style.display = "none";
+ },
+
+ disable_buttons: function() {
+ this.pass_button.disabled = true;
+ this.fail_button.disabled = true;
+ },
+
+ enable_buttons: function() {
+ this.pass_button.disabled = false;
+ this.fail_button.disabled = false;
+ },
+
+ on_test_start: function(test) {
+ if (test.type == "manual" || test.type == "reftest") {
+ this.show();
+ } else {
+ this.hide();
+ }
+ if (test.type == "reftest") {
+ this.show_ref();
+ this.ref_type.textContent = test.ref_type === "==" ? "equal" : "unequal";
+ if (test.ref_length > 1) {
+ this.ref_warning.textContent = "WARNING: only presenting first of " + test.ref_length + " references";
+ this.ref_warning.style.display = "inline";
+ } else {
+ this.ref_warning.textContent = "";
+ this.ref_warning.style.display = "none";
+ }
+ } else {
+ this.hide_ref();
+ }
+ },
+
+ on_done: function() {
+ this.hide();
+ }
+};
+
+function TestControl(elem, runner) {
+ this.elem = elem;
+ this.path_input = this.elem.querySelector(".path");
+ this.path_input.addEventListener("change", function() {
+ this.set_counts();
+ }.bind(this), false);
+ this.use_regex_input = this.elem.querySelector("#use_regex");
+ this.use_regex_input.addEventListener("change", function() {
+ this.set_counts();
+ }.bind(this), false);
+ this.pause_button = this.elem.querySelector("button.togglePause");
+ this.start_button = this.elem.querySelector("button.toggleStart");
+ this.type_checkboxes = Array.prototype.slice.call(
+ this.elem.querySelectorAll("input[type=checkbox].test-type"));
+ this.type_checkboxes.forEach(function(elem) {
+ elem.addEventListener("change", function() {
+ this.set_counts();
+ }.bind(this),
+ false);
+ elem.addEventListener("click", function() {
+ this.start_button.disabled = this.get_test_types().length < 1;
+ }.bind(this),
+ false);
+ }.bind(this));
+
+ this.timeout_input = this.elem.querySelector(".timeout_multiplier");
+ this.render_checkbox = this.elem.querySelector(".render");
+ this.testcount_area = this.elem.querySelector("#testcount");
+ this.runner = runner;
+ this.runner.done_callbacks.push(this.on_done.bind(this));
+ this.set_start();
+ this.set_counts();
+}
+
+TestControl.prototype = {
+ set_start: function() {
+ this.start_button.disabled = this.get_test_types().length < 1;
+ this.pause_button.disabled = true;
+ this.start_button.textContent = "Start";
+ this.path_input.disabled = false;
+ this.type_checkboxes.forEach(function(elem) {
+ elem.disabled = false;
+ });
+ this.start_button.onclick = function() {
+ var path = this.get_path();
+ var test_types = this.get_test_types();
+ var settings = this.get_testharness_settings();
+ var use_regex = this.get_use_regex();
+ this.runner.start(path, test_types, settings, use_regex);
+ this.set_stop();
+ this.set_pause();
+ }.bind(this);
+ },
+
+ set_stop: function() {
+ clearTimeout(this.runner.timeout);
+ this.pause_button.disabled = false;
+ this.start_button.textContent = "Stop";
+ this.path_input.disabled = true;
+ this.type_checkboxes.forEach(function(elem) {
+ elem.disabled = true;
+ });
+ this.start_button.onclick = function() {
+ this.runner.stop_flag = true;
+ this.runner.done();
+ }.bind(this);
+ },
+
+ set_pause: function() {
+ this.pause_button.textContent = "Pause";
+ this.pause_button.onclick = function() {
+ this.runner.pause();
+ this.set_resume();
+ }.bind(this);
+ },
+
+ set_resume: function() {
+ this.pause_button.textContent = "Resume";
+ this.pause_button.onclick = function() {
+ this.runner.unpause();
+ this.set_pause();
+ }.bind(this);
+
+ },
+
+ set_counts: function() {
+ if (this.runner.manifest_loading) {
+ setTimeout(function() {
+ this.set_counts();
+ }.bind(this), 1000);
+ return;
+ }
+ var path = this.get_path();
+ var test_types = this.get_test_types();
+ var use_regex = this.get_use_regex();
+ var iterator = new ManifestIterator(this.runner.manifest, path, test_types, use_regex);
+ var count = iterator.count();
+ this.testcount_area.textContent = count;
+ },
+
+ get_path: function() {
+ return this.path_input.value;
+ },
+
+ get_test_types: function() {
+ return this.type_checkboxes.filter(function(elem) {
+ return elem.checked;
+ }).map(function(elem) {
+ return elem.value;
+ });
+ },
+
+ get_testharness_settings: function() {
+ return {timeout_multiplier: parseFloat(this.timeout_input.value),
+ output: this.render_checkbox.checked};
+ },
+
+ get_use_regex: function() {
+ return this.use_regex_input.checked;
+ },
+
+ on_done: function() {
+ this.set_pause();
+ this.set_start();
+ }
+};
+
+function Results(runner) {
+ this.test_results = null;
+ this.runner = runner;
+
+ this.runner.start_callbacks.push(this.on_start.bind(this));
+}
+
+Results.prototype = {
+ on_start: function() {
+ this.test_results = [];
+ },
+
+ set: function(test, status, message, subtests) {
+ this.test_results.push({"test":test,
+ "subtests":subtests,
+ "status":status,
+ "message":message});
+ },
+
+ count: function() {
+ return this.test_results.length;
+ },
+
+ to_json: function() {
+ var data = {
+ "results": this.test_results.map(function(result) {
+ var rv = {"test":(result.test.hasOwnProperty("ref_url") ?
+ [result.test.url, result.test.ref_type, result.test.ref_url] :
+ result.test.url),
+ "subtests":result.subtests,
+ "status":result.status,
+ "message":result.message};
+ return rv;
+ })
+ };
+ return JSON.stringify(data, null, 2);
+ }
+};
+
+function Runner(manifest_path) {
+ this.server = location.protocol + "//" + location.host;
+ this.manifest = new Manifest(manifest_path);
+ this.path = null;
+ this.test_types = null;
+ this.manifest_iterator = null;
+
+ this.test_window = null;
+ this.test_div = document.getElementById('test_url');
+ this.current_test = null;
+ this.timeout = null;
+ this.num_tests = null;
+ this.pause_flag = false;
+ this.stop_flag = false;
+ this.done_flag = false;
+
+ this.manifest_wait_callbacks = [];
+ this.start_callbacks = [];
+ this.test_start_callbacks = [];
+ this.test_pause_callbacks = [];
+ this.result_callbacks = [];
+ this.done_callbacks = [];
+
+ this.results = new Results(this);
+
+ this.start_after_manifest_load = false;
+ this.manifest_loading = true;
+ this.manifest.load(this.manifest_loaded.bind(this));
+}
+
+Runner.prototype = {
+ test_timeout: 20000, //ms
+
+ currentTest: function() {
+ return this.manifest[this.mTestCount];
+ },
+
+ open_test_window: function() {
+ this.test_window = window.open("about:blank", 800, 600);
+ },
+
+ manifest_loaded: function() {
+ this.manifest_loading = false;
+ if (this.start_after_manifest_load) {
+ this.do_start();
+ }
+ },
+
+ start: function(path, test_types, testharness_settings, use_regex) {
+ this.pause_flag = false;
+ this.stop_flag = false;
+ this.done_flag = false;
+ this.path = path;
+ this.use_regex = use_regex;
+ this.test_types = test_types;
+ window.testharness_properties = testharness_settings;
+ this.manifest_iterator = new ManifestIterator(this.manifest, this.path, this.test_types, this.use_regex);
+ this.num_tests = null;
+
+ if (this.manifest.data === null) {
+ this.wait_for_manifest();
+ } else {
+ this.do_start();
+ }
+ },
+
+ wait_for_manifest: function() {
+ this.start_after_manifest_load = true;
+ this.manifest_wait_callbacks.forEach(function(callback) {
+ callback();
+ });
+ },
+
+ do_start: function() {
+ if (this.manifest_iterator.count() > 0) {
+ this.open_test_window();
+ this.start_callbacks.forEach(function(callback) {
+ callback();
+ });
+ this.run_next_test();
+ } else {
+ var tests = "tests";
+ if (this.test_types.length < 3) {
+ tests = this.test_types.join(" tests or ") + " tests";
+ }
+ var message = "No " + tests + " found in this path."
+ document.querySelector(".path").setCustomValidity(message);
+ this.done();
+ }
+ },
+
+ pause: function() {
+ this.pause_flag = true;
+ this.test_pause_callbacks.forEach(function(callback) {
+ callback(this.current_test);
+ }.bind(this));
+ },
+
+ unpause: function() {
+ this.pause_flag = false;
+ this.run_next_test();
+ },
+
+ on_result: function(status, message, subtests) {
+ clearTimeout(this.timeout);
+ this.results.set(this.current_test, status, message, subtests);
+ this.result_callbacks.forEach(function(callback) {
+ callback(this.current_test, status, message, subtests);
+ }.bind(this));
+ this.run_next_test();
+ },
+
+ on_timeout: function() {
+ this.on_result("TIMEOUT", "", []);
+ },
+
+ done: function() {
+ this.done_flag = true;
+ if (this.test_window) {
+ this.test_window.close();
+ }
+ this.done_callbacks.forEach(function(callback) {
+ callback();
+ });
+ },
+
+ run_next_test: function() {
+ if (this.pause_flag) {
+ return;
+ }
+ var next_test = this.manifest_iterator.next();
+ if (next_test === null||this.done_flag) {
+ this.done();
+ return;
+ }
+
+ this.current_test = next_test;
+
+ if (next_test.type === "testharness") {
+ this.timeout = setTimeout(this.on_timeout.bind(this),
+ this.test_timeout * window.testharness_properties.timeout_multiplier);
+ }
+ this.test_div.textContent = this.current_test.url;
+ this.load(this.current_test.url);
+
+ this.test_start_callbacks.forEach(function(callback) {
+ callback(this.current_test);
+ }.bind(this));
+ },
+
+ load: function(path) {
+ if (this.test_window.location === null) {
+ this.open_test_window();
+ }
+ this.test_window.location.href = this.server + path;
+ },
+
+ progress: function() {
+ return this.results.count() / this.test_count();
+ },
+
+ test_count: function() {
+ if (this.num_tests === null) {
+ this.num_tests = this.manifest_iterator.count();
+ }
+ return this.num_tests;
+ }
+
+};
+
+
+function parseOptions() {
+ var options = {
+ test_types: ["testharness", "reftest", "manual"]
+ };
+
+ var optionstrings = location.search.substring(1).split("&");
+ for (var i = 0, il = optionstrings.length; i < il; ++i) {
+ var opt = optionstrings[i];
+ //TODO: fix this for complex-valued options
+ options[opt.substring(0, opt.indexOf("="))] =
+ opt.substring(opt.indexOf("=") + 1);
+ }
+ return options;
+}
+
+function setup() {
+ var options = parseOptions();
+
+ if (options.path) {
+ document.getElementById('path').value = options.path;
+ }
+
+ runner = new Runner("/MANIFEST.json", options);
+ var test_control = new TestControl(document.getElementById("testControl"), runner);
+ new ManualUI(document.getElementById("manualUI"), runner);
+ new VisualOutput(document.getElementById("output"), runner);
+
+ if (options.autorun === "1") {
+ runner.start(test_control.get_path(),
+ test_control.get_test_types(),
+ test_control.get_testharness_settings(),
+ test_control.get_use_regex());
+ return;
+ }
+}
+
+window.completion_callback = function(tests, status) {
+ var harness_status_map = {0:"OK", 1:"ERROR", 2:"TIMEOUT", 3:"NOTRUN"};
+ var subtest_status_map = {0:"PASS", 1:"FAIL", 2:"TIMEOUT", 3:"NOTRUN"};
+
+ // this ugly hack is because IE really insists on holding on to the objects it creates in
+ // other windows, and on losing track of them when the window gets closed
+ var subtest_results = JSON.parse(JSON.stringify(
+ tests.map(function (test) {
+ return {name: test.name,
+ status: subtest_status_map[test.status],
+ message: test.message};
+ })
+ ));
+
+ runner.on_result(harness_status_map[status.status],
+ status.message,
+ subtest_results);
+};
+
+window.addEventListener("DOMContentLoaded", setup, false);
+})();
diff --git a/testing/web-platform/tests/tools/runner/update_manifest.py b/testing/web-platform/tests/tools/runner/update_manifest.py
new file mode 100644
index 000000000..cf6d1093b
--- /dev/null
+++ b/testing/web-platform/tests/tools/runner/update_manifest.py
@@ -0,0 +1,19 @@
+import imp
+import json
+import os
+import sys
+
+here = os.path.dirname(__file__)
+localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, "localpaths.py")))
+
+root = localpaths.repo_root
+
+import manifest
+
+def main(request, response):
+ path = os.path.join(root, "MANIFEST.json")
+ manifest_file = manifest.manifest.load(root, path)
+ manifest.update.update(root, "/", manifest_file)
+ manifest.manifest.write(manifest_file, path)
+
+ return [("Content-Type", "application/json")], json.dumps({"url": "/MANIFEST.json"})
diff --git a/testing/web-platform/tests/tools/scripts/id2path.js b/testing/web-platform/tests/tools/scripts/id2path.js
new file mode 100644
index 000000000..39b8d644b
--- /dev/null
+++ b/testing/web-platform/tests/tools/scripts/id2path.js
@@ -0,0 +1,12 @@
+
+var fs = require("fs")
+, pth = require("path")
+, id = process.argv[2]
+;
+
+if (!id) {
+ console.log("Missing ID");
+ process.exit(1);
+}
+
+console.log(JSON.parse(fs.readFileSync(pth.join(__dirname, "id2path.json"), "utf8"))[id]);
diff --git a/testing/web-platform/tests/tools/scripts/id2path.json b/testing/web-platform/tests/tools/scripts/id2path.json
new file mode 100644
index 000000000..491bd8694
--- /dev/null
+++ b/testing/web-platform/tests/tools/scripts/id2path.json
@@ -0,0 +1,822 @@
+{
+ "introduction": "microdata/introduction",
+ "background": "html/introduction/background",
+ "audience": "html/introduction/audience",
+ "scope": "html/introduction/scope",
+ "history-0": "html/introduction/history-0",
+ "design-notes": "html/introduction/design-notes",
+ "serializability-of-script-execution": "html/introduction/design-notes/serializability-of-script-execution",
+ "compliance-with-other-specifications": "html/introduction/design-notes/compliance-with-other-specifications",
+ "html-vs-xhtml": "html/introduction/html-vs-xhtml",
+ "structure-of-this-specification": "html/introduction/structure-of-this-specification",
+ "how-to-read-this-specification": "html/introduction/structure-of-this-specification/how-to-read-this-specification",
+ "typographic-conventions": "html/introduction/structure-of-this-specification/typographic-conventions",
+ "fingerprint": "html/introduction/fingerprint",
+ "a-quick-introduction-to-html": "html/introduction/a-quick-introduction-to-html",
+ "writing-secure-applications-with-html": "html/introduction/a-quick-introduction-to-html/writing-secure-applications-with-html",
+ "common-pitfalls-to-avoid-when-using-the-scripting-apis": "html/introduction/a-quick-introduction-to-html/common-pitfalls-to-avoid-when-using-the-scripting-apis",
+ "conformance-requirements-for-authors": "html/introduction/conformance-requirements-for-authors",
+ "presentational-markup": "html/introduction/conformance-requirements-for-authors/presentational-markup",
+ "syntax-errors": "html/introduction/conformance-requirements-for-authors/syntax-errors",
+ "restrictions-on-content-models-and-on-attribute-values": "html/introduction/conformance-requirements-for-authors/restrictions-on-content-models-and-on-attribute-values",
+ "suggested-reading": "html/introduction/suggested-reading",
+ "infrastructure": "html/infrastructure",
+ "terminology": "microdata/terminology",
+ "resources": "html/infrastructure/terminology/resources",
+ "xml": "html/infrastructure/terminology/xml",
+ "dom-trees": "html/infrastructure/terminology/dom-trees",
+ "scripting-0": "html/infrastructure/terminology/scripting-0",
+ "plugins": "html/infrastructure/terminology/plugins",
+ "character-encodings": "html/infrastructure/terminology/character-encodings",
+ "conformance-requirements": "microdata/conformance-requirements",
+ "conformance-classes": "html/infrastructure/conformance-requirements/conformance-classes",
+ "dependencies": "microdata/dependencies",
+ "extensibility": "html/infrastructure/conformance-requirements/extensibility",
+ "case-sensitivity-and-string-comparison": "html/infrastructure/case-sensitivity-and-string-comparison",
+ "utf-8": "html/infrastructure/utf-8",
+ "common-microsyntaxes": "html/infrastructure/common-microsyntaxes",
+ "common-parser-idioms": "html/infrastructure/common-microsyntaxes/common-parser-idioms",
+ "boolean-attributes": "html/infrastructure/common-microsyntaxes/boolean-attributes",
+ "keywords-and-enumerated-attributes": "html/infrastructure/common-microsyntaxes/keywords-and-enumerated-attributes",
+ "numbers": "html/infrastructure/common-microsyntaxes/numbers",
+ "signed-integers": "html/infrastructure/common-microsyntaxes/numbers",
+ "non-negative-integers": "html/infrastructure/common-microsyntaxes/numbers",
+ "floating-point-numbers": "html/infrastructure/common-microsyntaxes/numbers",
+ "percentages-and-dimensions": "html/infrastructure/common-microsyntaxes/numbers",
+ "lists-of-integers": "html/infrastructure/common-microsyntaxes/numbers",
+ "lists-of-dimensions": "html/infrastructure/common-microsyntaxes/numbers",
+ "dates-and-times": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "months": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "dates": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "yearless-dates": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "times": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "local-dates-and-times": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "time-zones": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "global-dates-and-times": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "weeks": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "durations": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "vaguer-moments-in-time": "html/infrastructure/common-microsyntaxes/dates-and-times",
+ "colors": "html/infrastructure/common-microsyntaxes/colors",
+ "space-separated-tokens": "html/infrastructure/common-microsyntaxes/space-separated-tokens",
+ "comma-separated-tokens": "html/infrastructure/common-microsyntaxes/comma-separated-tokens",
+ "syntax-references": "html/infrastructure/common-microsyntaxes/syntax-references",
+ "mq": "html/infrastructure/common-microsyntaxes/mq",
+ "urls": "html/infrastructure/urls",
+ "terminology-0": "html/infrastructure/urls/terminology-0",
+ "parsing-urls": "html/infrastructure/urls/parsing-urls",
+ "base-urls": "html/infrastructure/urls/base-urls",
+ "resolving-urls": "html/infrastructure/urls/resolving-urls",
+ "url-manipulation-and-creation": "html/infrastructure/urls/url-manipulation-and-creation",
+ "dynamic-changes-to-base-urls": "html/infrastructure/urls/dynamic-changes-to-base-urls",
+ "interfaces-for-url-manipulation": "html/infrastructure/urls/interfaces-for-url-manipulation",
+ "fetching-resources": "html/infrastructure/fetching-resources",
+ "terminology-1": "html/infrastructure/fetching-resources/terminology-1",
+ "processing-model": "html/infrastructure/fetching-resources/processing-model",
+ "encrypted-http-and-related-security-concerns": "html/infrastructure/fetching-resources/encrypted-http-and-related-security-concerns",
+ "content-type-sniffing": "html/infrastructure/fetching-resources/content-type-sniffing",
+ "extracting-character-encodings-from-meta-elements": "html/infrastructure/fetching-resources/extracting-character-encodings-from-meta-elements",
+ "cors-settings-attributes": "html/infrastructure/fetching-resources/cors-settings-attributes",
+ "cors-enabled-fetch": "html/infrastructure/fetching-resources/cors-enabled-fetch",
+ "common-dom-interfaces": "html/infrastructure/common-dom-interfaces",
+ "reflecting-content-attributes-in-idl-attributes": "html/infrastructure/common-dom-interfaces/reflecting-content-attributes-in-idl-attributes",
+ "collections": "html/infrastructure/common-dom-interfaces/collections",
+ "htmlallcollection": "html/infrastructure/common-dom-interfaces/collections",
+ "htmlformcontrolscollection": "html/infrastructure/common-dom-interfaces/collections",
+ "htmloptionscollection": "html/infrastructure/common-dom-interfaces/collections",
+ "domstringmap": "html/infrastructure/common-dom-interfaces/domstringmap",
+ "transferable-objects": "html/infrastructure/common-dom-interfaces/transferable-objects",
+ "safe-passing-of-structured-data": "html/infrastructure/common-dom-interfaces/safe-passing-of-structured-data",
+ "garbage-collection": "html/infrastructure/common-dom-interfaces/garbage-collection",
+ "namespaces": "html/infrastructure/namespaces",
+ "dom": "html/dom",
+ "documents": "html/dom/documents",
+ "the-document-object": "html/dom/documents/the-document-object",
+ "security-document": "html/dom/documents/security-document",
+ "resource-metadata-management": "html/dom/documents/resource-metadata-management",
+ "dom-tree-accessors": "html/dom/documents/dom-tree-accessors",
+ "loading-xml-documents": "html/dom/documents/loading-xml-documents",
+ "elements": "html/dom/elements",
+ "semantics-0": "html/dom/elements/semantics-0",
+ "elements-in-the-dom": "html/dom/elements/elements-in-the-dom",
+ "global-attributes": "html/dom/elements/global-attributes",
+ "the-id-attribute": "html/dom/elements/global-attributes",
+ "the-title-attribute": "html/dom/elements/global-attributes",
+ "the-lang-and-xml:lang-attributes": "html/dom/elements/global-attributes",
+ "the-translate-attribute": "html/dom/elements/global-attributes",
+ "the-xml:base-attribute-(xml-only)": "html/dom/elements/global-attributes",
+ "the-dir-attribute": "html/dom/elements/global-attributes",
+ "classes": "html/dom/elements/global-attributes",
+ "the-style-attribute": "html/dom/elements/global-attributes",
+ "embedding-custom-non-visible-data-with-the-data-*-attributes": "html/dom/elements/global-attributes",
+ "element-definitions": "html/dom/elements/element-definitions",
+ "attributes": "html/dom/elements/element-definitions",
+ "content-models": "html/dom/elements/content-models",
+ "kinds-of-content": "html/dom/elements/content-models",
+ "metadata-content": "html/dom/elements/content-models",
+ "flow-content": "html/dom/elements/content-models",
+ "sectioning-content": "html/dom/elements/content-models",
+ "heading-content": "html/dom/elements/content-models",
+ "phrasing-content": "html/dom/elements/content-models",
+ "embedded-content": "html/dom/elements/content-models",
+ "interactive-content": "html/dom/elements/content-models",
+ "palpable-content": "html/dom/elements/content-models",
+ "transparent-content-models": "html/dom/elements/content-models",
+ "paragraphs": "html/dom/elements/content-models",
+ "requirements-relating-to-bidirectional-algorithm-formatting-characters": "html/dom/elements/requirements-relating-to-bidirectional-algorithm-formatting-characters",
+ "wai-aria": "html/dom/elements/wai-aria",
+ "aria-role-attribute": "html/dom/elements/wai-aria",
+ "state-and-property-attributes": "html/dom/elements/wai-aria",
+ "sec-strong-native-semantics": "html/dom/elements/wai-aria",
+ "sec-implicit-aria-semantics": "html/dom/elements/wai-aria",
+ "interactions-with-xpath-and-xslt": "html/dom/interactions-with-xpath-and-xslt",
+ "dynamic-markup-insertion": "html/dom/dynamic-markup-insertion",
+ "opening-the-input-stream": "html/dom/dynamic-markup-insertion/opening-the-input-stream",
+ "closing-the-input-stream": "html/dom/dynamic-markup-insertion/closing-the-input-stream",
+ "document.write()": "html/dom/dynamic-markup-insertion/document-write",
+ "document.writeln()": "html/dom/dynamic-markup-insertion/document-writeln",
+ "semantics": "html/semantics",
+ "the-root-element": "html/semantics/the-root-element",
+ "the-html-element": "html/semantics/the-root-element/the-html-element",
+ "document-metadata": "html/semantics/document-metadata",
+ "the-head-element": "html/semantics/document-metadata/the-head-element",
+ "the-title-element": "html/semantics/document-metadata/the-title-element",
+ "the-base-element": "html/semantics/document-metadata/the-base-element",
+ "the-link-element": "html/semantics/document-metadata/the-link-element",
+ "the-meta-element": "html/semantics/document-metadata/the-meta-element",
+ "standard-metadata-names": "html/semantics/document-metadata/the-meta-element",
+ "other-metadata-names": "html/semantics/document-metadata/the-meta-element",
+ "pragma-directives": "html/semantics/document-metadata/the-meta-element",
+ "other-pragma-directives": "html/semantics/document-metadata/the-meta-element",
+ "charset": "html/semantics/document-metadata/the-meta-element",
+ "the-style-element": "html/semantics/document-metadata/the-style-element",
+ "styling": "html/semantics/document-metadata/styling",
+ "scripting-1": "html/semantics/scripting-1",
+ "the-script-element": "html/semantics/scripting-1/the-script-element",
+ "scriptingLanguages": "html/semantics/scripting-1/the-script-element",
+ "restrictions-for-contents-of-script-elements": "html/semantics/scripting-1/the-script-element",
+ "inline-documentation-for-external-scripts": "html/semantics/scripting-1/the-script-element",
+ "scriptTagXSLT": "html/semantics/scripting-1/the-script-element",
+ "the-noscript-element": "html/semantics/scripting-1/the-noscript-element",
+ "sections": "html/semantics/sections",
+ "the-body-element": "html/semantics/sections/the-body-element",
+ "the-article-element": "html/semantics/sections/the-article-element",
+ "the-section-element": "html/semantics/sections/the-section-element",
+ "the-nav-element": "html/semantics/sections/the-nav-element",
+ "the-aside-element": "html/semantics/sections/the-aside-element",
+ "the-h1,-h2,-h3,-h4,-h5,-and-h6-elements": "html/semantics/sections/the-h1-h2-h3-h4-h5-and-h6-elements",
+ "the-hgroup-element": "html/semantics/sections/the-hgroup-element",
+ "the-header-element": "html/semantics/sections/the-header-element",
+ "the-footer-element": "html/semantics/sections/the-footer-element",
+ "the-address-element": "html/semantics/sections/the-address-element",
+ "headings-and-sections": "html/semantics/sections/headings-and-sections",
+ "outlines": "html/semantics/sections/headings-and-sections",
+ "usage-summary-0": "html/semantics/sections/usage-summary-0",
+ "grouping-content": "html/semantics/grouping-content",
+ "the-p-element": "html/semantics/grouping-content/the-p-element",
+ "the-hr-element": "html/semantics/grouping-content/the-hr-element",
+ "the-pre-element": "html/semantics/grouping-content/the-pre-element",
+ "the-blockquote-element": "html/semantics/grouping-content/the-blockquote-element",
+ "the-ol-element": "html/semantics/grouping-content/the-ol-element",
+ "the-ul-element": "html/semantics/grouping-content/the-ul-element",
+ "the-li-element": "html/semantics/grouping-content/the-li-element",
+ "the-dl-element": "html/semantics/grouping-content/the-dl-element",
+ "the-dt-element": "html/semantics/grouping-content/the-dt-element",
+ "the-dd-element": "html/semantics/grouping-content/the-dd-element",
+ "the-figure-element": "html/semantics/grouping-content/the-figure-element",
+ "the-figcaption-element": "html/semantics/grouping-content/the-figcaption-element",
+ "the-div-element": "html/semantics/grouping-content/the-div-element",
+ "text-level-semantics": "html/semantics/text-level-semantics",
+ "the-a-element": "html/semantics/text-level-semantics/the-a-element",
+ "the-em-element": "html/semantics/text-level-semantics/the-em-element",
+ "the-strong-element": "html/semantics/text-level-semantics/the-strong-element",
+ "the-small-element": "html/semantics/text-level-semantics/the-small-element",
+ "the-s-element": "html/semantics/text-level-semantics/the-s-element",
+ "the-cite-element": "html/semantics/text-level-semantics/the-cite-element",
+ "the-q-element": "html/semantics/text-level-semantics/the-q-element",
+ "the-dfn-element": "html/semantics/text-level-semantics/the-dfn-element",
+ "the-abbr-element": "html/semantics/text-level-semantics/the-abbr-element",
+ "the-time-element": "html/semantics/text-level-semantics/the-time-element",
+ "the-code-element": "html/semantics/text-level-semantics/the-code-element",
+ "the-var-element": "html/semantics/text-level-semantics/the-var-element",
+ "the-samp-element": "html/semantics/text-level-semantics/the-samp-element",
+ "the-kbd-element": "html/semantics/text-level-semantics/the-kbd-element",
+ "the-sub-and-sup-elements": "html/semantics/text-level-semantics/the-sub-and-sup-elements",
+ "the-i-element": "html/semantics/text-level-semantics/the-i-element",
+ "the-b-element": "html/semantics/text-level-semantics/the-b-element",
+ "the-u-element": "html/semantics/text-level-semantics/the-u-element",
+ "the-mark-element": "html/semantics/text-level-semantics/the-mark-element",
+ "the-ruby-element": "html/semantics/text-level-semantics/the-ruby-element",
+ "the-rt-element": "html/semantics/text-level-semantics/the-rt-element",
+ "the-rp-element": "html/semantics/text-level-semantics/the-rp-element",
+ "the-bdi-element": "html/semantics/text-level-semantics/the-bdi-element",
+ "the-bdo-element": "html/semantics/text-level-semantics/the-bdo-element",
+ "the-span-element": "html/semantics/text-level-semantics/the-span-element",
+ "the-br-element": "html/semantics/text-level-semantics/the-br-element",
+ "the-wbr-element": "html/semantics/text-level-semantics/the-wbr-element",
+ "usage-summary": "html/semantics/text-level-semantics/usage-summary",
+ "edits": "html/semantics/edits",
+ "the-ins-element": "html/semantics/edits/the-ins-element",
+ "the-del-element": "html/semantics/edits/the-del-element",
+ "attributes-common-to-ins-and-del-elements": "html/semantics/edits/attributes-common-to-ins-and-del-elements",
+ "edits-and-paragraphs": "html/semantics/edits/edits-and-paragraphs",
+ "edits-and-lists": "html/semantics/edits/edits-and-lists",
+ "edits-and-tables": "html/semantics/edits/edits-and-tables",
+ "embedded-content-0": "html/semantics/embedded-content-0",
+ "the-img-element": "html/semantics/embedded-content-0/the-img-element",
+ "alt": "html/semantics/embedded-content-0/the-img-element",
+ "general-guidelines": "html/semantics/embedded-content-0/the-img-element",
+ "a-link-or-button-containing-nothing-but-the-image": "html/semantics/embedded-content-0/the-img-element",
+ "a-phrase-or-paragraph-with-an-alternative-graphical-representation:-charts,-diagrams,-graphs,-maps,-illustrations": "html/semantics/embedded-content-0/the-img-element",
+ "a-short-phrase-or-label-with-an-alternative-graphical-representation:-icons,-logos": "html/semantics/embedded-content-0/the-img-element",
+ "text-that-has-been-rendered-to-a-graphic-for-typographical-effect": "html/semantics/embedded-content-0/the-img-element",
+ "a-graphical-representation-of-some-of-the-surrounding-text": "html/semantics/embedded-content-0/the-img-element",
+ "a-purely-decorative-image-that-doesn't-add-any-information": "html/semantics/embedded-content-0/the-img-element",
+ "a-group-of-images-that-form-a-single-larger-picture-with-no-links": "html/semantics/embedded-content-0/the-img-element",
+ "a-group-of-images-that-form-a-single-larger-picture-with-links": "html/semantics/embedded-content-0/the-img-element",
+ "a-key-part-of-the-content": "html/semantics/embedded-content-0/the-img-element",
+ "an-image-not-intended-for-the-user": "html/semantics/embedded-content-0/the-img-element",
+ "guidance-for-markup-generators": "html/semantics/embedded-content-0/the-img-element",
+ "guidance-for-conformance-checkers": "html/semantics/embedded-content-0/the-img-element",
+ "the-iframe-element": "html/semantics/embedded-content-0/the-iframe-element",
+ "the-embed-element": "html/semantics/embedded-content-0/the-embed-element",
+ "the-object-element": "html/semantics/embedded-content-0/the-object-element",
+ "the-param-element": "html/semantics/embedded-content-0/the-param-element",
+ "the-video-element": "html/semantics/embedded-content-0/the-video-element",
+ "the-audio-element": "html/semantics/embedded-content-0/the-audio-element",
+ "the-source-element": "html/semantics/embedded-content-0/the-source-element",
+ "the-track-element": "html/semantics/embedded-content-0/the-track-element",
+ "media-elements": "html/semantics/embedded-content-0/media-elements",
+ "error-codes": "html/semantics/embedded-content-0/media-elements",
+ "location-of-the-media-resource": "html/semantics/embedded-content-0/media-elements",
+ "mime-types": "html/semantics/embedded-content-0/media-elements",
+ "network-states": "html/semantics/embedded-content-0/media-elements",
+ "loading-the-media-resource": "html/semantics/embedded-content-0/media-elements",
+ "offsets-into-the-media-resource": "html/semantics/embedded-content-0/media-elements",
+ "ready-states": "html/semantics/embedded-content-0/media-elements",
+ "playing-the-media-resource": "html/semantics/embedded-content-0/media-elements",
+ "seeking": "html/semantics/embedded-content-0/media-elements",
+ "media-resources-with-multiple-media-tracks": "html/semantics/embedded-content-0/media-elements",
+ "audiotracklist-and-videotracklist-objects": "html/semantics/embedded-content-0/media-elements",
+ "selecting-specific-audio-and-video-tracks-declaratively": "html/semantics/embedded-content-0/media-elements",
+ "synchronising-multiple-media-elements": "html/semantics/embedded-content-0/media-elements",
+ "introduction-0": "html/semantics/embedded-content-0/media-elements",
+ "media-controllers": "html/semantics/embedded-content-0/media-elements",
+ "assigning-a-media-controller-declaratively": "html/semantics/embedded-content-0/media-elements",
+ "timed-text-tracks": "html/semantics/embedded-content-0/media-elements",
+ "text-track-model": "html/semantics/embedded-content-0/media-elements",
+ "sourcing-in-band-text-tracks": "html/semantics/embedded-content-0/media-elements",
+ "sourcing-out-of-band-text-tracks": "html/semantics/embedded-content-0/media-elements",
+ "guidelines-for-exposing-cues-in-various-formats-as-text-track-cues": "html/semantics/embedded-content-0/media-elements",
+ "text-track-api": "html/semantics/embedded-content-0/media-elements",
+ "text-tracks-describing-chapters": "html/semantics/embedded-content-0/media-elements",
+ "cue-events": "html/semantics/embedded-content-0/media-elements",
+ "user-interface": "html/semantics/embedded-content-0/media-elements",
+ "time-ranges": "html/semantics/embedded-content-0/media-elements",
+ "event-definitions": "html/semantics/embedded-content-0/media-elements",
+ "mediaevents": "html/semantics/embedded-content-0/media-elements",
+ "security-and-privacy-considerations": "html/semantics/embedded-content-0/media-elements",
+ "best-practices-for-authors-using-media-elements": "html/semantics/embedded-content-0/media-elements",
+ "best-practices-for-implementors-of-media-elements": "html/semantics/embedded-content-0/media-elements",
+ "the-canvas-element": "html/semantics/embedded-content-0/the-canvas-element",
+ "color-spaces-and-color-correction": "html/semantics/embedded-content-0/the-canvas-element",
+ "security-with-canvas-elements": "html/semantics/embedded-content-0/the-canvas-element",
+ "the-map-element": "html/semantics/embedded-content-0/the-map-element",
+ "the-area-element": "html/semantics/embedded-content-0/the-area-element",
+ "image-maps": "html/semantics/embedded-content-0/image-maps",
+ "authoring": "html/semantics/embedded-content-0/image-maps",
+ "processing-model-0": "html/semantics/embedded-content-0/image-maps",
+ "mathml": "html/semantics/embedded-content-0/mathml",
+ "svg": "html/semantics/embedded-content-0/svg",
+ "dimension-attributes": "html/semantics/embedded-content-0/dimension-attributes",
+ "tabular-data": "html/semantics/tabular-data",
+ "the-table-element": "html/semantics/tabular-data/the-table-element",
+ "table-descriptions-techniques": "html/semantics/tabular-data/the-table-element",
+ "table-layout-techniques": "html/semantics/tabular-data/the-table-element",
+ "the-caption-element": "html/semantics/tabular-data/the-caption-element",
+ "the-colgroup-element": "html/semantics/tabular-data/the-colgroup-element",
+ "the-col-element": "html/semantics/tabular-data/the-col-element",
+ "the-tbody-element": "html/semantics/tabular-data/the-tbody-element",
+ "the-thead-element": "html/semantics/tabular-data/the-thead-element",
+ "the-tfoot-element": "html/semantics/tabular-data/the-tfoot-element",
+ "the-tr-element": "html/semantics/tabular-data/the-tr-element",
+ "the-td-element": "html/semantics/tabular-data/the-td-element",
+ "the-th-element": "html/semantics/tabular-data/the-th-element",
+ "attributes-common-to-td-and-th-elements": "html/semantics/tabular-data/attributes-common-to-td-and-th-elements",
+ "processing-model-1": "html/semantics/tabular-data/processing-model-1",
+ "forming-a-table": "html/semantics/tabular-data/processing-model-1",
+ "header-and-data-cell-semantics": "html/semantics/tabular-data/processing-model-1",
+ "examples": "canvas2d/examples",
+ "forms": "html/semantics/forms",
+ "introduction-1": "html/semantics/forms/introduction-1",
+ "writing-a-form's-user-interface": "html/semantics/forms/introduction-1",
+ "implementing-the-server-side-processing-for-a-form": "html/semantics/forms/introduction-1",
+ "configuring-a-form-to-communicate-with-a-server": "html/semantics/forms/introduction-1",
+ "client-side-form-validation": "html/semantics/forms/introduction-1",
+ "enabling-client-side-automatic-filling-of-form-controls": "html/semantics/forms/introduction-1",
+ "improving-the-user-experience-on-mobile-devices": "html/semantics/forms/introduction-1",
+ "the-difference-between-the-field-type,-the-autofill-field-name,-and-the-input-modality": "html/semantics/forms/introduction-1",
+ "input-author-notes": "html/semantics/forms/introduction-1",
+ "categories": "html/semantics/forms/categories",
+ "the-form-element": "html/semantics/forms/the-form-element",
+ "the-fieldset-element": "html/semantics/forms/the-fieldset-element",
+ "the-legend-element": "html/semantics/forms/the-legend-element",
+ "the-label-element": "html/semantics/forms/the-label-element",
+ "the-input-element": "html/semantics/forms/the-input-element",
+ "states-of-the-type-attribute": "html/semantics/forms/the-input-element",
+ "hidden-state-(type=hidden)": "html/semantics/forms/the-input-element",
+ "text-(type=text)-state-and-search-state-(type=search)": "html/semantics/forms/the-input-element",
+ "telephone-state-(type=tel)": "html/semantics/forms/the-input-element",
+ "url-state-(type=url)": "html/semantics/forms/the-input-element",
+ "e-mail-state-(type=email)": "html/semantics/forms/the-input-element",
+ "password-state-(type=password)": "html/semantics/forms/the-input-element",
+ "date-and-time-state-(type=datetime)": "html/semantics/forms/the-input-element",
+ "date-state-(type=date)": "html/semantics/forms/the-input-element",
+ "month-state-(type=month)": "html/semantics/forms/the-input-element",
+ "week-state-(type=week)": "html/semantics/forms/the-input-element",
+ "time-state-(type=time)": "html/semantics/forms/the-input-element",
+ "local-date-and-time-state-(type=datetime-local)": "html/semantics/forms/the-input-element",
+ "number-state-(type=number)": "html/semantics/forms/the-input-element",
+ "range-state-(type=range)": "html/semantics/forms/the-input-element",
+ "color-state-(type=color)": "html/semantics/forms/the-input-element",
+ "checkbox-state-(type=checkbox)": "html/semantics/forms/the-input-element",
+ "radio-button-state-(type=radio)": "html/semantics/forms/the-input-element",
+ "file-upload-state-(type=file)": "html/semantics/forms/the-input-element",
+ "submit-button-state-(type=submit)": "html/semantics/forms/the-input-element",
+ "image-button-state-(type=image)": "html/semantics/forms/the-input-element",
+ "reset-button-state-(type=reset)": "html/semantics/forms/the-input-element",
+ "button-state-(type=button)": "html/semantics/forms/the-input-element",
+ "input-impl-notes": "html/semantics/forms/the-input-element",
+ "common-input-element-attributes": "html/semantics/forms/the-input-element",
+ "the-maxlength-attribute": "html/semantics/forms/the-input-element",
+ "the-size-attribute": "html/semantics/forms/the-input-element",
+ "the-readonly-attribute": "html/semantics/forms/the-input-element",
+ "the-required-attribute": "html/semantics/forms/the-input-element",
+ "the-multiple-attribute": "html/semantics/forms/the-input-element",
+ "the-pattern-attribute": "html/semantics/forms/the-input-element",
+ "the-min-and-max-attributes": "html/semantics/forms/the-input-element",
+ "the-step-attribute": "html/semantics/forms/the-input-element",
+ "the-list-attribute": "html/semantics/forms/the-input-element",
+ "the-placeholder-attribute": "html/semantics/forms/the-input-element",
+ "common-input-element-apis": "html/semantics/forms/the-input-element",
+ "common-event-behaviors": "html/semantics/forms/the-input-element",
+ "the-button-element": "html/semantics/forms/the-button-element",
+ "the-select-element": "html/semantics/forms/the-select-element",
+ "the-datalist-element": "html/semantics/forms/the-datalist-element",
+ "the-optgroup-element": "html/semantics/forms/the-optgroup-element",
+ "the-option-element": "html/semantics/forms/the-option-element",
+ "the-textarea-element": "html/semantics/forms/the-textarea-element",
+ "the-keygen-element": "html/semantics/forms/the-keygen-element",
+ "the-output-element": "html/semantics/forms/the-output-element",
+ "the-progress-element": "html/semantics/forms/the-progress-element",
+ "the-meter-element": "html/semantics/forms/the-meter-element",
+ "form-control-infrastructure": "html/semantics/forms/form-control-infrastructure",
+ "a-form-control's-value": "html/semantics/forms/form-control-infrastructure",
+ "mutability": "html/semantics/forms/form-control-infrastructure",
+ "association-of-controls-and-forms": "html/semantics/forms/form-control-infrastructure",
+ "attributes-common-to-form-controls": "html/semantics/forms/attributes-common-to-form-controls",
+ "naming-form-controls:-the-name-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "submitting-element-directionality:-the-dirname-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "limiting-user-input-length:-the-maxlength-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "enabling-and-disabling-form-controls:-the-disabled-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "form-submission": "html/semantics/forms/attributes-common-to-form-controls",
+ "autofocusing-a-form-control:-the-autofocus-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "input-modalities:-the-inputmode-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "autofilling-form-controls:-the-autocomplete-attribute": "html/semantics/forms/attributes-common-to-form-controls",
+ "textFieldSelection": "html/semantics/forms/textfieldselection",
+ "constraints": "html/semantics/forms/constraints",
+ "definitions": "html/semantics/forms/constraints",
+ "constraint-validation": "html/semantics/forms/constraints",
+ "the-constraint-validation-api": "html/semantics/forms/constraints",
+ "security-forms": "html/semantics/forms/constraints",
+ "form-submission-0": "html/semantics/forms/form-submission-0",
+ "introduction-2": "html/semantics/forms/form-submission-0",
+ "implicit-submission": "html/semantics/forms/form-submission-0",
+ "form-submission-algorithm": "html/semantics/forms/form-submission-0",
+ "constructing-form-data-set": "html/semantics/forms/form-submission-0",
+ "url-encoded-form-data": "html/semantics/forms/form-submission-0",
+ "multipart-form-data": "html/semantics/forms/form-submission-0",
+ "plain-text-form-data": "html/semantics/forms/form-submission-0",
+ "resetting-a-form": "html/semantics/forms/resetting-a-form",
+ "interactive-elements": "html/semantics/interactive-elements",
+ "the-details-element": "html/semantics/interactive-elements/the-details-element",
+ "the-summary-element": "html/semantics/interactive-elements/the-summary-element",
+ "the-command-element": "html/semantics/interactive-elements/the-command-element",
+ "the-menu-element": "html/semantics/interactive-elements/the-menu-element",
+ "menus-intro": "html/semantics/interactive-elements/the-menu-element",
+ "building-menus-and-toolbars": "html/semantics/interactive-elements/the-menu-element",
+ "context-menus": "html/semantics/interactive-elements/the-menu-element",
+ "toolbars": "html/semantics/interactive-elements/the-menu-element",
+ "commands": "html/semantics/interactive-elements/commands",
+ "using-the-a-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-button-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-input-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-option-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-command-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-command-attribute-on-command-elements-to-define-a-command-indirectly": "html/semantics/interactive-elements/commands",
+ "using-the-accesskey-attribute-on-a-label-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-accesskey-attribute-on-a-legend-element-to-define-a-command": "html/semantics/interactive-elements/commands",
+ "using-the-accesskey-attribute-to-define-a-command-on-other-elements": "html/semantics/interactive-elements/commands",
+ "the-dialog-element": "html/semantics/interactive-elements/the-dialog-element",
+ "anchor-points": "html/semantics/interactive-elements/the-dialog-element",
+ "links": "html/semantics/links",
+ "introduction-3": "html/semantics/links/introduction-3",
+ "links-created-by-a-and-area-elements": "html/semantics/links/links-created-by-a-and-area-elements",
+ "following-hyperlinks": "html/semantics/links/following-hyperlinks",
+ "downloading-resources": "html/semantics/links/downloading-resources",
+ "hyperlink-auditing": "html/semantics/links/downloading-resources",
+ "linkTypes": "html/semantics/links/linktypes",
+ "rel-alternate": "html/semantics/links/linktypes",
+ "link-type-author": "html/semantics/links/linktypes",
+ "link-type-bookmark": "html/semantics/links/linktypes",
+ "link-type-help": "html/semantics/links/linktypes",
+ "rel-icon": "html/semantics/links/linktypes",
+ "link-type-license": "html/semantics/links/linktypes",
+ "link-type-nofollow": "html/semantics/links/linktypes",
+ "link-type-noreferrer": "html/semantics/links/linktypes",
+ "link-type-prefetch": "html/semantics/links/linktypes",
+ "link-type-search": "html/semantics/links/linktypes",
+ "link-type-stylesheet": "html/semantics/links/linktypes",
+ "link-type-tag": "html/semantics/links/linktypes",
+ "sequential-link-types": "html/semantics/links/linktypes",
+ "link-type-next": "html/semantics/links/linktypes",
+ "link-type-prev": "html/semantics/links/linktypes",
+ "other-link-types": "html/semantics/links/linktypes",
+ "common-idioms": "html/semantics/common-idioms",
+ "the-main-part-of-the-content": "html/semantics/common-idioms/the-main-part-of-the-content",
+ "rel-up": "html/semantics/common-idioms/rel-up",
+ "tag-clouds": "html/semantics/common-idioms/tag-clouds",
+ "conversations": "html/semantics/common-idioms/conversations",
+ "footnotes": "html/semantics/common-idioms/footnotes",
+ "disabled-elements": "html/semantics/disabled-elements",
+ "selectors": "html/semantics/selectors",
+ "case-sensitivity": "html/semantics/selectors/case-sensitivity",
+ "pseudo-classes": "html/semantics/selectors/pseudo-classes",
+ "browsers": "html/browsers",
+ "windows": "html/browsers/windows",
+ "nested-browsing-contexts": "html/browsers/windows/nested-browsing-contexts",
+ "navigating-nested-browsing-contexts-in-the-dom": "html/browsers/windows/nested-browsing-contexts",
+ "auxiliary-browsing-contexts": "html/browsers/windows/auxiliary-browsing-contexts",
+ "navigating-auxiliary-browsing-contexts-in-the-dom": "html/browsers/windows/auxiliary-browsing-contexts",
+ "secondary-browsing-contexts": "html/browsers/windows/secondary-browsing-contexts",
+ "security-nav": "html/browsers/windows/security-nav",
+ "groupings-of-browsing-contexts": "html/browsers/windows/groupings-of-browsing-contexts",
+ "browsing-context-names": "html/browsers/windows/browsing-context-names",
+ "the-window-object": "html/browsers/the-window-object",
+ "security-window": "html/browsers/the-window-object/security-window",
+ "apis-for-creating-and-navigating-browsing-contexts-by-name": "html/browsers/the-window-object/apis-for-creating-and-navigating-browsing-contexts-by-name",
+ "accessing-other-browsing-contexts": "html/browsers/the-window-object/accessing-other-browsing-contexts",
+ "named-access-on-the-window-object": "html/browsers/the-window-object/named-access-on-the-window-object",
+ "garbage-collection-and-browsing-contexts": "html/browsers/the-window-object/garbage-collection-and-browsing-contexts",
+ "closing-browsing-contexts": "html/browsers/the-window-object/closing-browsing-contexts",
+ "browser-interface-elements": "html/browsers/the-window-object/browser-interface-elements",
+ "the-windowproxy-object": "html/browsers/the-window-object/the-windowproxy-object",
+ "origin": "html/browsers/origin",
+ "relaxing-the-same-origin-restriction": "html/browsers/origin/relaxing-the-same-origin-restriction",
+ "sandboxing": "html/browsers/sandboxing",
+ "history": "html/browsers/history",
+ "the-session-history-of-browsing-contexts": "html/browsers/history/the-session-history-of-browsing-contexts",
+ "the-history-interface": "html/browsers/history/the-history-interface",
+ "the-location-interface": "html/browsers/history/the-location-interface",
+ "security-location": "html/browsers/history/the-location-interface",
+ "history-notes": "html/browsers/history/history-notes",
+ "browsing-the-web": "html/browsers/browsing-the-web",
+ "navigating-across-documents": "html/browsers/browsing-the-web/navigating-across-documents",
+ "read-html": "html/browsers/browsing-the-web/read-html",
+ "read-xml": "html/browsers/browsing-the-web/read-xml",
+ "read-text": "html/browsers/browsing-the-web/read-text",
+ "read-multipart-x-mixed-replace": "html/browsers/browsing-the-web/read-multipart-x-mixed-replace",
+ "read-media": "html/browsers/browsing-the-web/read-media",
+ "read-plugin": "html/browsers/browsing-the-web/read-plugin",
+ "read-ua-inline": "html/browsers/browsing-the-web/read-ua-inline",
+ "scroll-to-fragid": "html/browsers/browsing-the-web/scroll-to-fragid",
+ "history-traversal": "html/browsers/browsing-the-web/history-traversal",
+ "event-definitions-0": "html/browsers/browsing-the-web/history-traversal",
+ "unloading-documents": "html/browsers/browsing-the-web/unloading-documents",
+ "event-definition": "html/browsers/browsing-the-web/unloading-documents",
+ "aborting-a-document-load": "html/browsers/browsing-the-web/aborting-a-document-load",
+ "offline": "html/browsers/offline",
+ "introduction-4": "html/browsers/offline/introduction-4",
+ "supporting-offline-caching-for-legacy-applications": "html/browsers/offline/introduction-4",
+ "appcacheevents": "html/browsers/offline/introduction-4",
+ "appcache": "html/browsers/offline/appcache",
+ "manifests": "html/browsers/offline/manifests",
+ "some-sample-manifests": "html/browsers/offline/manifests",
+ "writing-cache-manifests": "html/browsers/offline/manifests",
+ "parsing-cache-manifests": "html/browsers/offline/manifests",
+ "downloading-or-updating-an-application-cache": "html/browsers/offline/downloading-or-updating-an-application-cache",
+ "the-application-cache-selection-algorithm": "html/browsers/offline/the-application-cache-selection-algorithm",
+ "changesToNetworkingModel": "html/browsers/offline/changestonetworkingmodel",
+ "expiring-application-caches": "html/browsers/offline/expiring-application-caches",
+ "disk-space": "html/browsers/offline/disk-space",
+ "application-cache-api": "html/browsers/offline/application-cache-api",
+ "browser-state": "html/browsers/offline/browser-state",
+ "webappapis": "html/webappapis",
+ "scripting": "html/webappapis/scripting",
+ "introduction-5": "html/webappapis/scripting/introduction-5",
+ "enabling-and-disabling-scripting": "html/webappapis/scripting/enabling-and-disabling-scripting",
+ "processing-model-2": "html/webappapis/scripting/processing-model-2",
+ "definitions-0": "html/webappapis/scripting/processing-model-2",
+ "calling-scripts": "html/webappapis/scripting/processing-model-2",
+ "creating-scripts": "html/webappapis/scripting/processing-model-2",
+ "killing-scripts": "html/webappapis/scripting/processing-model-2",
+ "runtime-script-errors": "html/webappapis/scripting/processing-model-2",
+ "runtime-script-errors-in-documents": "html/webappapis/scripting/processing-model-2",
+ "event-loops": "html/webappapis/scripting/event-loops",
+ "definitions-1": "html/webappapis/scripting/event-loops",
+ "processing-model-3": "html/webappapis/scripting/event-loops",
+ "generic-task-sources": "html/webappapis/scripting/event-loops",
+ "javascript-protocol": "html/webappapis/scripting/javascript-protocol",
+ "events": "html/webappapis/scripting/events",
+ "event-handler-attributes": "html/webappapis/scripting/events",
+ "event-handlers-on-elements,-document-objects,-and-window-objects": "html/webappapis/scripting/events",
+ "event-firing": "html/webappapis/scripting/events",
+ "events-and-the-window-object": "html/webappapis/scripting/events",
+ "atob": "html/webappapis/atob",
+ "timers": "html/webappapis/timers",
+ "user-prompts": "html/webappapis/user-prompts",
+ "simple-dialogs": "html/webappapis/user-prompts/simple-dialogs",
+ "printing": "html/webappapis/user-prompts/printing",
+ "dialogs-implemented-using-separate-documents": "html/webappapis/user-prompts/dialogs-implemented-using-separate-documents",
+ "system-state-and-capabilities": "html/webappapis/system-state-and-capabilities",
+ "the-navigator-object": "html/webappapis/system-state-and-capabilities/the-navigator-object",
+ "client-identification": "html/webappapis/system-state-and-capabilities/the-navigator-object",
+ "custom-handlers": "html/webappapis/system-state-and-capabilities/the-navigator-object",
+ "security-and-privacy": "html/webappapis/system-state-and-capabilities/the-navigator-object",
+ "sample-handler-impl": "html/webappapis/system-state-and-capabilities/the-navigator-object",
+ "manually-releasing-the-storage-mutex": "html/webappapis/system-state-and-capabilities/the-navigator-object",
+ "the-external-interface": "html/webappapis/system-state-and-capabilities/the-external-interface",
+ "editing": "html/editing",
+ "the-hidden-attribute": "html/editing/the-hidden-attribute",
+ "inert-subtrees": "html/editing/inert-subtrees",
+ "the-inert-attribute": "html/editing/inert-subtrees/the-inert-attribute",
+ "activation": "html/editing/activation",
+ "focus": "html/editing/focus",
+ "sequential-focus-navigation-and-the-tabindex-attribute": "html/editing/focus/sequential-focus-navigation-and-the-tabindex-attribute",
+ "focus-management": "html/editing/focus/focus-management",
+ "document-level-focus-apis": "html/editing/focus/document-level-focus-apis",
+ "element-level-focus-apis": "html/editing/focus/element-level-focus-apis",
+ "assigning-keyboard-shortcuts": "html/editing/assigning-keyboard-shortcuts",
+ "introduction-6": "html/editing/assigning-keyboard-shortcuts/introduction-6",
+ "the-accesskey-attribute": "html/editing/assigning-keyboard-shortcuts/the-accesskey-attribute",
+ "processing-model-4": "html/editing/assigning-keyboard-shortcuts/processing-model-4",
+ "editing-0": "html/editing/editing-0",
+ "contenteditable": "html/editing/editing-0/contenteditable",
+ "making-entire-documents-editable:-the-designmode-idl-attribute": "html/editing/editing-0/making-entire-documents-editable-the-designmode-idl-attribute",
+ "best-practices-for-in-page-editors": "html/editing/editing-0/best-practices-for-in-page-editors",
+ "editing-apis": "html/editing/editing-0/editing-apis",
+ "spelling-and-grammar-checking": "html/editing/editing-0/spelling-and-grammar-checking",
+ "dnd": "html/editing/dnd",
+ "introduction-7": "html/editing/dnd/introduction-7",
+ "the-drag-data-store": "html/editing/dnd/the-drag-data-store",
+ "the-datatransfer-interface": "html/editing/dnd/the-datatransfer-interface",
+ "the-datatransferitemlist-interface": "html/editing/dnd/the-datatransfer-interface",
+ "the-datatransferitem-interface": "html/editing/dnd/the-datatransfer-interface",
+ "the-dragevent-interface": "html/editing/dnd/the-dragevent-interface",
+ "drag-and-drop-processing-model": "html/editing/dnd/drag-and-drop-processing-model",
+ "dndevents": "html/editing/dnd/dndevents",
+ "the-draggable-attribute": "html/editing/dnd/the-draggable-attribute",
+ "the-dropzone-attribute": "html/editing/dnd/the-dropzone-attribute",
+ "security-risks-in-the-drag-and-drop-model": "html/editing/dnd/security-risks-in-the-drag-and-drop-model",
+ "syntax": "html/syntax",
+ "writing": "html/syntax/writing",
+ "the-doctype": "html/syntax/writing/the-doctype",
+ "elements-0": "html/syntax/writing/elements-0",
+ "start-tags": "html/syntax/writing/elements-0",
+ "end-tags": "html/syntax/writing/elements-0",
+ "attributes-0": "html/syntax/writing/elements-0",
+ "optional-tags": "html/syntax/writing/elements-0",
+ "element-restrictions": "html/syntax/writing/elements-0",
+ "cdata-rcdata-restrictions": "html/syntax/writing/elements-0",
+ "text": "html/syntax/writing/text",
+ "newlines": "html/syntax/writing/text",
+ "character-references": "html/syntax/writing/character-references",
+ "cdata-sections": "html/syntax/writing/cdata-sections",
+ "comments": "html/syntax/writing/comments",
+ "parsing": "html/syntax/parsing",
+ "overview-of-the-parsing-model": "html/syntax/parsing",
+ "the-input-byte-stream": "html/syntax/parsing",
+ "determining-the-character-encoding": "html/syntax/parsing",
+ "character-encodings-0": "html/syntax/parsing",
+ "changing-the-encoding-while-parsing": "html/syntax/parsing",
+ "preprocessing-the-input-stream": "html/syntax/parsing",
+ "parse-state": "html/syntax/parsing",
+ "the-insertion-mode": "html/syntax/parsing",
+ "the-stack-of-open-elements": "html/syntax/parsing",
+ "the-list-of-active-formatting-elements": "html/syntax/parsing",
+ "the-element-pointers": "html/syntax/parsing",
+ "other-parsing-state-flags": "html/syntax/parsing",
+ "tokenization": "html/syntax/parsing",
+ "data-state": "html/syntax/parsing",
+ "character-reference-in-data-state": "html/syntax/parsing",
+ "rcdata-state": "html/syntax/parsing",
+ "character-reference-in-rcdata-state": "html/syntax/parsing",
+ "rawtext-state": "html/syntax/parsing",
+ "script-data-state": "html/syntax/parsing",
+ "plaintext-state": "html/syntax/parsing",
+ "tag-open-state": "html/syntax/parsing",
+ "end-tag-open-state": "html/syntax/parsing",
+ "tag-name-state": "html/syntax/parsing",
+ "rcdata-less-than-sign-state": "html/syntax/parsing",
+ "rcdata-end-tag-open-state": "html/syntax/parsing",
+ "rcdata-end-tag-name-state": "html/syntax/parsing",
+ "rawtext-less-than-sign-state": "html/syntax/parsing",
+ "rawtext-end-tag-open-state": "html/syntax/parsing",
+ "rawtext-end-tag-name-state": "html/syntax/parsing",
+ "script-data-less-than-sign-state": "html/syntax/parsing",
+ "script-data-end-tag-open-state": "html/syntax/parsing",
+ "script-data-end-tag-name-state": "html/syntax/parsing",
+ "script-data-escape-start-state": "html/syntax/parsing",
+ "script-data-escape-start-dash-state": "html/syntax/parsing",
+ "script-data-escaped-state": "html/syntax/parsing",
+ "script-data-escaped-dash-state": "html/syntax/parsing",
+ "script-data-escaped-dash-dash-state": "html/syntax/parsing",
+ "script-data-escaped-less-than-sign-state": "html/syntax/parsing",
+ "script-data-escaped-end-tag-open-state": "html/syntax/parsing",
+ "script-data-escaped-end-tag-name-state": "html/syntax/parsing",
+ "script-data-double-escape-start-state": "html/syntax/parsing",
+ "script-data-double-escaped-state": "html/syntax/parsing",
+ "script-data-double-escaped-dash-state": "html/syntax/parsing",
+ "script-data-double-escaped-dash-dash-state": "html/syntax/parsing",
+ "script-data-double-escaped-less-than-sign-state": "html/syntax/parsing",
+ "script-data-double-escape-end-state": "html/syntax/parsing",
+ "before-attribute-name-state": "html/syntax/parsing",
+ "attribute-name-state": "html/syntax/parsing",
+ "after-attribute-name-state": "html/syntax/parsing",
+ "before-attribute-value-state": "html/syntax/parsing",
+ "attribute-value-(double-quoted)-state": "html/syntax/parsing",
+ "attribute-value-(single-quoted)-state": "html/syntax/parsing",
+ "attribute-value-(unquoted)-state": "html/syntax/parsing",
+ "character-reference-in-attribute-value-state": "html/syntax/parsing",
+ "after-attribute-value-(quoted)-state": "html/syntax/parsing",
+ "self-closing-start-tag-state": "html/syntax/parsing",
+ "bogus-comment-state": "html/syntax/parsing",
+ "markup-declaration-open-state": "html/syntax/parsing",
+ "comment-start-state": "html/syntax/parsing",
+ "comment-start-dash-state": "html/syntax/parsing",
+ "comment-state": "html/syntax/parsing",
+ "comment-end-dash-state": "html/syntax/parsing",
+ "comment-end-state": "html/syntax/parsing",
+ "comment-end-bang-state": "html/syntax/parsing",
+ "doctype-state": "html/syntax/parsing",
+ "before-doctype-name-state": "html/syntax/parsing",
+ "doctype-name-state": "html/syntax/parsing",
+ "after-doctype-name-state": "html/syntax/parsing",
+ "after-doctype-public-keyword-state": "html/syntax/parsing",
+ "before-doctype-public-identifier-state": "html/syntax/parsing",
+ "doctype-public-identifier-(double-quoted)-state": "html/syntax/parsing",
+ "doctype-public-identifier-(single-quoted)-state": "html/syntax/parsing",
+ "after-doctype-public-identifier-state": "html/syntax/parsing",
+ "between-doctype-public-and-system-identifiers-state": "html/syntax/parsing",
+ "after-doctype-system-keyword-state": "html/syntax/parsing",
+ "before-doctype-system-identifier-state": "html/syntax/parsing",
+ "doctype-system-identifier-(double-quoted)-state": "html/syntax/parsing",
+ "doctype-system-identifier-(single-quoted)-state": "html/syntax/parsing",
+ "after-doctype-system-identifier-state": "html/syntax/parsing",
+ "bogus-doctype-state": "html/syntax/parsing",
+ "cdata-section-state": "html/syntax/parsing",
+ "tokenizing-character-references": "html/syntax/parsing",
+ "tree-construction": "html/syntax/parsing",
+ "creating-and-inserting-elements": "html/syntax/parsing",
+ "closing-elements-that-have-implied-end-tags": "html/syntax/parsing",
+ "foster-parenting": "html/syntax/parsing",
+ "parsing-main-inhtml": "html/syntax/parsing",
+ "the-initial-insertion-mode": "html/syntax/parsing",
+ "the-before-html-insertion-mode": "html/syntax/parsing",
+ "the-before-head-insertion-mode": "html/syntax/parsing",
+ "parsing-main-inhead": "html/syntax/parsing",
+ "parsing-main-inheadnoscript": "html/syntax/parsing",
+ "the-after-head-insertion-mode": "html/syntax/parsing",
+ "parsing-main-inbody": "html/syntax/parsing",
+ "parsing-main-incdata": "html/syntax/parsing",
+ "parsing-main-intable": "html/syntax/parsing",
+ "parsing-main-intabletext": "html/syntax/parsing",
+ "parsing-main-incaption": "html/syntax/parsing",
+ "parsing-main-incolgroup": "html/syntax/parsing",
+ "parsing-main-intbody": "html/syntax/parsing",
+ "parsing-main-intr": "html/syntax/parsing",
+ "parsing-main-intd": "html/syntax/parsing",
+ "parsing-main-inselect": "html/syntax/parsing",
+ "parsing-main-inselectintable": "html/syntax/parsing",
+ "parsing-main-afterbody": "html/syntax/parsing",
+ "parsing-main-inframeset": "html/syntax/parsing",
+ "parsing-main-afterframeset": "html/syntax/parsing",
+ "the-after-after-body-insertion-mode": "html/syntax/parsing",
+ "the-after-after-frameset-insertion-mode": "html/syntax/parsing",
+ "parsing-main-inforeign": "html/syntax/parsing",
+ "the-end": "html/syntax/parsing",
+ "coercing-an-html-dom-into-an-infoset": "html/syntax/parsing",
+ "an-introduction-to-error-handling-and-strange-cases-in-the-parser": "html/syntax/parsing",
+ "misnested-tags:-b-i-/b-/i": "html/syntax/parsing",
+ "misnested-tags:-b-p-/b-/p": "html/syntax/parsing",
+ "unexpected-markup-in-tables": "html/syntax/parsing",
+ "scripts-that-modify-the-page-as-it-is-being-parsed": "html/syntax/parsing",
+ "the-execution-of-scripts-that-are-moving-across-multiple-documents": "html/syntax/parsing",
+ "unclosed-formatting-elements": "html/syntax/parsing",
+ "serializing-html-fragments": "html/syntax/serializing-html-fragments",
+ "parsing-html-fragments": "html/syntax/parsing-html-fragments",
+ "named-character-references": "html/syntax/named-character-references",
+ "the-xhtml-syntax": "html/the-xhtml-syntax",
+ "writing-xhtml-documents": "html/the-xhtml-syntax/writing-xhtml-documents",
+ "parsing-xhtml-documents": "html/the-xhtml-syntax/parsing-xhtml-documents",
+ "serializing-xhtml-fragments": "html/the-xhtml-syntax/serializing-xhtml-fragments",
+ "parsing-xhtml-fragments": "html/the-xhtml-syntax/parsing-xhtml-fragments",
+ "rendering": "html/rendering",
+ "introduction-8": "html/rendering/introduction-8",
+ "the-css-user-agent-style-sheet-and-presentational-hints": "html/rendering/the-css-user-agent-style-sheet-and-presentational-hints",
+ "non-replaced-elements": "html/rendering/non-replaced-elements",
+ "hidden-elements": "html/rendering/non-replaced-elements/hidden-elements",
+ "the-page": "html/rendering/non-replaced-elements/the-page",
+ "flow-content-0": "html/rendering/non-replaced-elements/flow-content-0",
+ "phrasing-content-0": "html/rendering/non-replaced-elements/phrasing-content-0",
+ "bidirectional-text": "html/rendering/non-replaced-elements/bidirectional-text",
+ "quotes": "html/rendering/non-replaced-elements/quotes",
+ "sections-and-headings": "html/rendering/non-replaced-elements/sections-and-headings",
+ "lists": "html/rendering/non-replaced-elements/lists",
+ "tables": "html/rendering/non-replaced-elements/tables",
+ "form-controls": "html/rendering/non-replaced-elements/form-controls",
+ "the-hr-element-0": "html/rendering/non-replaced-elements/the-hr-element-0",
+ "the-fieldset-element-0": "html/rendering/non-replaced-elements/the-fieldset-element-0",
+ "replaced-elements": "html/rendering/replaced-elements",
+ "embedded-content-1": "html/rendering/replaced-elements/embedded-content-1",
+ "images": "html/rendering/replaced-elements/images",
+ "attributes-for-embedded-content-and-images": "html/rendering/replaced-elements/attributes-for-embedded-content-and-images",
+ "image-maps-0": "html/rendering/replaced-elements/image-maps-0",
+ "toolbars-0": "html/rendering/replaced-elements/toolbars-0",
+ "bindings": "html/rendering/bindings",
+ "introduction-9": "html/rendering/bindings/introduction-9",
+ "the-button-element-0": "html/rendering/bindings/the-button-element-0",
+ "the-details-element-0": "html/rendering/bindings/the-details-element-0",
+ "the-input-element-as-a-text-entry-widget": "html/rendering/bindings/the-input-element-as-a-text-entry-widget",
+ "the-input-element-as-domain-specific-widgets": "html/rendering/bindings/the-input-element-as-domain-specific-widgets",
+ "the-input-element-as-a-range-control": "html/rendering/bindings/the-input-element-as-a-range-control",
+ "the-input-element-as-a-color-well": "html/rendering/bindings/the-input-element-as-a-color-well",
+ "the-input-element-as-a-checkbox-and-radio-button-widgets": "html/rendering/bindings/the-input-element-as-a-checkbox-and-radio-button-widgets",
+ "the-input-element-as-a-file-upload-control": "html/rendering/bindings/the-input-element-as-a-file-upload-control",
+ "the-input-element-as-a-button": "html/rendering/bindings/the-input-element-as-a-button",
+ "the-marquee-element": "html/rendering/bindings/the-marquee-element",
+ "the-meter-element-0": "html/rendering/bindings/the-meter-element-0",
+ "the-progress-element-0": "html/rendering/bindings/the-progress-element-0",
+ "the-select-element-0": "html/rendering/bindings/the-select-element-0",
+ "the-textarea-element-0": "html/rendering/bindings/the-textarea-element-0",
+ "the-keygen-element-0": "html/rendering/bindings/the-keygen-element-0",
+ "frames-and-framesets": "html/rendering/frames-and-framesets",
+ "interactive-media": "html/rendering/interactive-media",
+ "links,-forms,-and-navigation": "html/rendering/interactive-media/links-forms-and-navigation",
+ "the-title-attribute-0": "html/rendering/interactive-media/the-title-attribute-0",
+ "editing-hosts": "html/rendering/interactive-media/editing-hosts",
+ "text-rendered-in-native-user-interfaces": "html/rendering/interactive-media/text-rendered-in-native-user-interfaces",
+ "print-media": "html/rendering/print-media",
+ "unstyled-xml-documents": "html/rendering/unstyled-xml-documents",
+ "obsolete": "html/obsolete",
+ "obsolete-but-conforming-features": "html/obsolete/obsolete-but-conforming-features",
+ "warnings-for-obsolete-but-conforming-features": "html/obsolete/obsolete-but-conforming-features/warnings-for-obsolete-but-conforming-features",
+ "non-conforming-features": "html/obsolete/non-conforming-features",
+ "requirements-for-implementations": "html/obsolete/requirements-for-implementations",
+ "the-applet-element": "html/obsolete/requirements-for-implementations/the-applet-element",
+ "the-marquee-element-0": "html/obsolete/requirements-for-implementations/the-marquee-element-0",
+ "frames": "html/obsolete/requirements-for-implementations/frames",
+ "other-elements,-attributes-and-apis": "html/obsolete/requirements-for-implementations/other-elements-attributes-and-apis",
+ "iana": "microdata/iana",
+ "text/html": "html/iana/text-html",
+ "multipart/x-mixed-replace": "html/iana/multipart-x-mixed-replace",
+ "application/xhtml+xml": "html/iana/application-xhtml-xml",
+ "application/x-www-form-urlencoded": "html/iana/application-x-www-form-urlencoded",
+ "text/cache-manifest": "html/iana/text-cache-manifest",
+ "ping-to": "html/iana/ping-to",
+ "web+-scheme-prefix": "html/iana/web-scheme-prefix",
+ "the-canvas-state": "canvas2d/the-canvas-state",
+ "drawingstyle-objects": "canvas2d/drawingstyle-objects",
+ "line-styles": "canvas2d/line-styles",
+ "text-styles": "canvas2d/text-styles",
+ "building-paths": "canvas2d/building-paths",
+ "path-objects": "canvas2d/path-objects",
+ "transformations": "canvas2d/transformations",
+ "fill-and-stroke-styles": "canvas2d/fill-and-stroke-styles",
+ "drawing-rectangles-to-the-canvas": "canvas2d/drawing-rectangles-to-the-canvas",
+ "drawing-text-to-the-canvas": "canvas2d/drawing-text-to-the-canvas",
+ "drawing-paths-to-the-canvas": "canvas2d/drawing-paths-to-the-canvas",
+ "drawing-images-to-the-canvas": "canvas2d/drawing-images-to-the-canvas",
+ "hit-regions": "canvas2d/hit-regions",
+ "pixel-manipulation": "canvas2d/pixel-manipulation",
+ "compositing": "canvas2d/compositing",
+ "shadows": "canvas2d/shadows",
+ "drawing-model": "canvas2d/drawing-model",
+ "best-practices": "canvas2d/best-practices",
+ "overview": "microdata/introduction/overview",
+ "the-basic-syntax": "microdata/introduction/the-basic-syntax",
+ "typed-items": "microdata/introduction/typed-items",
+ "global-identifiers-for-items": "microdata/introduction/global-identifiers-for-items",
+ "selecting-names-when-defining-vocabularies": "microdata/introduction/selecting-names-when-defining-vocabularies",
+ "using-the-microdata-dom-api": "microdata/introduction/using-the-microdata-dom-api",
+ "encoding-microdata": "microdata/encoding-microdata",
+ "the-microdata-model": "microdata/encoding-microdata/the-microdata-model",
+ "items": "microdata/encoding-microdata/items",
+ "names:-the-itemprop-attribute": "microdata/encoding-microdata/names-the-itemprop-attribute",
+ "values": "microdata/encoding-microdata/values",
+ "associating-names-with-items": "microdata/encoding-microdata/associating-names-with-items",
+ "microdata-and-other-namespaces": "microdata/encoding-microdata/microdata-and-other-namespaces",
+ "microdata-dom-api": "microdata/microdata-dom-api",
+ "htmlpropertiescollection": "microdata/microdata-dom-api/htmlpropertiescollection",
+ "converting-html-to-other-formats": "microdata/converting-html-to-other-formats",
+ "json": "microdata/converting-html-to-other-formats/json"
+} \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/scripts/manifest.js b/testing/web-platform/tests/tools/scripts/manifest.js
new file mode 100644
index 000000000..fbd556825
--- /dev/null
+++ b/testing/web-platform/tests/tools/scripts/manifest.js
@@ -0,0 +1,140 @@
+// grab test metadata from a test file
+function __result_handler() {
+
+ function __get_metadata() {
+ var obj = new Object();
+ var author = [];
+ var assert = [];
+ var help = [];
+ var match = [];
+ var mismatch = [];
+ var flags = [];
+ var nodes;
+
+ nodes = document.querySelectorAll('link[rel="author"]');
+ for (var i = 0; i < nodes.length; i++) {
+ var href = nodes[i].getAttribute("href");
+ var title = nodes[i].getAttribute("title");
+ var s = title;
+ if (href != null) {
+ s += " <" + href + ">";
+ }
+ author.push(s);
+ }
+ if (nodes.length > 0) obj.author = author;
+ nodes = document.querySelectorAll('meta[name="assert"]');
+ for (var i = 0; i < nodes.length; i++) {
+ assert.push(nodes[i].getAttribute("content"));
+ }
+ if (nodes.length > 0) obj.assert = assert;
+ nodes = document.querySelectorAll('link[rel="help"]');
+ for (var i = 0; i < nodes.length; i++) {
+ help.push(nodes[i].getAttribute("href"));
+ }
+ if (nodes.length > 0) obj.help = help;
+ nodes = document.querySelectorAll('link[rel="match"]');
+ for (var i = 0; i < nodes.length; i++) {
+ match.push(nodes[i].getAttribute("href"));
+ }
+ if (nodes.length > 0) obj.match = match;
+ nodes = document.querySelectorAll('link[rel="mismatch"]');
+ for (var i = 0; i < nodes.length; i++) {
+ mismatch.push(nodes[i].getAttribute("href"));
+ }
+ if (nodes.length > 0) obj.match = mismatch;
+ nodes = document.querySelectorAll('meta[name="flags"]');
+ for (var i = 0; i < nodes.length; i++) {
+ flags.push(nodes[i].getAttribute("content"));
+ }
+ if (nodes.length > 0) obj.flags = flags;
+
+ return obj;
+ }
+
+ var meta = __get_metadata();
+ var nodes;
+
+ function copy(obj, prop, arr) {
+ if (typeof arr !== "undefined") {
+ var a = [];
+ for (var i = 0; i<arr.length;i++) {
+ a[i] = arr[i];
+ }
+ obj[prop] = a;
+ }
+ }
+
+
+ var ret = new Object();
+ ret.location = document.location.href;
+ ret.type = "manual";
+ ret.tests = new Object();
+
+ var node = document.querySelector('script[src$="/resources/testharness.js"]');
+ if (node !== null) {
+ ret.type = "script";
+ }
+
+ if (ret.type === "script") {
+ if (typeof metadata_generator === "undefined"
+ || Object.keys(metadata_generator.currentMetadata).length === 0)
+ return "WRAPPER:TRY_AGAIN";
+ else {
+ for (var key in metadata_generator.currentMetadata) {
+ var obj = metadata_generator.currentMetadata[key];
+ var newtest = new Object();
+ ret.tests[key]= newtest;
+ if (typeof obj.help === "undefined") {
+ copy(newtest, "help", meta.help);
+ } else if (typeof obj.help === "string") {
+ newtest.help = [ obj.help ];
+ }
+ if (typeof obj.author === "undefined") {
+ copy(newtest, "author", meta.author);
+ } else if (typeof obj.author === "string") {
+ newtest.author = [ obj.author ];
+ }
+ if (typeof obj.assert === "undefined") {
+ copy(newtest, "assert", meta.assert);
+ } else if (typeof obj.assert === "string") {
+ newtest.assert = [ obj.assert ];
+ }
+ copy(newtest, "match", meta.match);
+ copy(newtest, "mismatch", meta.mismatch);
+ copy(newtest, "flags", meta.flags);
+ }
+ return ret;
+ }
+ } else {
+ var newtest = meta;
+ ret.tests[document.title]= newtest;
+
+ if (typeof newtest.match !== "undefined"
+ || typeof newtest.mismatch !== "undefined") {
+ ret.type = "reftest";
+ }
+
+ return ret;
+ }
+
+}
+
+function __give_up() {
+ var ret = new Object();
+ ret.location = document.location.href;
+ ret.type = "manual";
+ ret.tests = new Object();
+
+ var node = document.querySelector('script[src$="/resources/testharness.js"]');
+ if (node !== null) {
+ ret.type = "script";
+ } else if (typeof newtest.match !== "undefined"
+ || typeof newtest.mismatch !== "undefined") {
+ ret.type = "reftest";
+ }
+
+ var newtest = __get_metadata();
+ ret.tests[document.title]= newtest;
+
+ return ret;
+}
diff --git a/testing/web-platform/tests/tools/scripts/package.json b/testing/web-platform/tests/tools/scripts/package.json
new file mode 100644
index 000000000..5618f6691
--- /dev/null
+++ b/testing/web-platform/tests/tools/scripts/package.json
@@ -0,0 +1,11 @@
+{
+ "name": "convert"
+, "version": "0.0.1"
+, "private": true
+, "dependencies": {
+ "mkdirp": "*"
+ , "wrench": "*"
+ , "underscore": "*"
+ , "jsdom": "*"
+ }
+}
diff --git a/testing/web-platform/tests/tools/scripts/toc.js b/testing/web-platform/tests/tools/scripts/toc.js
new file mode 100644
index 000000000..120d4be03
--- /dev/null
+++ b/testing/web-platform/tests/tools/scripts/toc.js
@@ -0,0 +1,107 @@
+// grab the table of contents filled with all the anchors
+function __result_handler() {
+ function getMap() {
+ var toc_element = document.getElementById("contents").nextElementSibling;
+
+ function getSection() {
+ function getIds(node) {
+ var a = [];
+
+ var nodes = node.querySelectorAll('*[id]');
+ for (var i = 0; i < nodes.length; i++) {
+ a.push(nodes[i].getAttribute("id"));
+ }
+ return a;
+ }
+
+ function getTOCIds() {
+ var a = [];
+
+ var nodes = toc_element.querySelectorAll('li');
+ for (var i = 0; i < nodes.length; i++) {
+ var href = nodes[i].firstElementChild.getAttribute("href");
+ a.push(href.substring(1));
+ }
+ return a;
+ }
+
+ var obj = new Object();
+ var ids = getIds(document);
+ var toc = getTOCIds();
+
+ for (var i = 1; i < toc.length; i++) {
+ var key1 = toc[i-1];
+ var key2 = toc[i];
+ var map = [];
+
+ var index1 = ids.indexOf(key1);
+ var index2 = ids.indexOf(key2);
+
+ if ((index2-index1) > 1) {
+ for (var j = index1+1; j < index2;j++) {
+ map.push(ids[j]);
+ }
+ }
+
+ obj[key1] = map;
+ }
+ {
+ var key = toc[toc.length-1];
+ var index = ids.indexOf(key);
+ var map = [];
+
+ for (var j = index+1; j < ids.length;j++) {
+ map.push(ids[j]);
+ }
+ obj[key] = map;
+ }
+
+ return obj;
+ }
+
+ function section(id) {
+ this.id = id;
+ }
+ function addSubSection(section, sub) {
+ if (typeof section.sections === "undefined") {
+ section.sections = [];
+ }
+ section.sections.push(sub);
+ }
+
+ function li(el, map) {
+ var obj = new section(el.firstElementChild.getAttribute("href").substring(1));
+ obj.title = el.firstElementChild.textContent;
+ var child = el.firstElementChild;
+
+ var m = map[obj.id];
+ for (var i = 0; i < m.length; i++) {
+ var sub = new section(m[i]);
+ addSubSection(obj, sub);
+ }
+ while (child !== null) {
+ if (child.nodeName === "OL") ol(child, obj, map);
+ child = child.nextElementSibling;
+ }
+ return obj;
+ }
+
+ function ol(el, section, map) {
+ var child = el.firstElementChild;
+ while (child !== null) {
+ addSubSection(section, li(child, map));
+ child = child.nextElementSibling;
+ }
+ }
+
+ var map = getSection();
+ var main = new section("___main___");
+ main.title = document.title;
+
+ ol(toc_element, main, map);
+
+ return main;
+ }
+
+ return getMap();
+}
diff --git a/testing/web-platform/tests/tools/scripts/update-directory-structure.js b/testing/web-platform/tests/tools/scripts/update-directory-structure.js
new file mode 100644
index 000000000..7d73d93ac
--- /dev/null
+++ b/testing/web-platform/tests/tools/scripts/update-directory-structure.js
@@ -0,0 +1,106 @@
+
+// convert from old-style test structure to new style
+
+var fs = require("fs")
+, pth = require("path")
+, jsdom = require("jsdom")
+, mkdirp = require("mkdirp").sync
+, testDir = pth.join(__dirname, "../..")
+, MAX_DEPTH = 3
+, id2path = {}
+, limitDepth = {
+ parsing: true
+ }
+;
+
+var sections = {
+ html: "http://www.w3.org/html/wg/drafts/html/master/Overview.html"
+, canvas2d: "http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/Overview.html"
+, microdata: "http://www.w3.org/html/wg/drafts/microdata/master/Overview.html"
+};
+
+function walkTree ($, $el, list) {
+ $el.find("> li").each(function () {
+ var $li = $(this)
+ , $a = $li.find("> a").first()
+ ;
+ // skip sections that don't have a number
+ if (!/^\s*\d+/.test($a.text())) return;
+ var href = $a.attr("href").replace(/^.*#/, "")
+ , def = {
+ id: href.toLowerCase()
+ .replace(/[^a-z0-9\-]/g, "-")
+ .replace(/\-{2,}/g, "-")
+ .replace(/(?:^\-|\-$)/g, "")
+ , original_id: href
+ }
+ , $ol = $li.find("> ol").first()
+ ;
+ if ($ol.length) {
+ def.children = [];
+ walkTree($, $ol, def.children);
+ }
+ list.push(def);
+ });
+}
+
+function extractSections (sec, secDir, spec, cb) {
+ jsdom.env(
+ spec
+ , function (err, window) {
+ if (err) return cb(err);
+ jsdom.jQueryify(window, "https://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js", function (window, $) {
+ if (!$) return cb("$ was not defined");
+ var $root = $("body > ol.toc").first()
+ , tree = []
+ ;
+ walkTree($, $root, tree);
+ cb(null, tree, sec, secDir);
+ }
+ );
+ });
+}
+
+function makeID2Path (base, tree) {
+ for (var i = 0, n = tree.length; i < n; i++) {
+ var sec = tree[i];
+ id2path[sec.original_id] = base;
+ if (sec.children && sec.children.length) makeID2Path(base, sec.children);
+ }
+}
+
+function makeDirs (base, tree, depth) {
+ console.log("Making " + base + " at depth " + depth);
+ for (var i = 0, n = tree.length; i < n; i++) {
+ var sec = tree[i]
+ , path = pth.join(base, sec.id)
+ ;
+ mkdirp(path);
+ fs.writeFileSync(pth.join(path, ".gitkeep"), "", "utf8");
+ id2path[sec.original_id] = path;
+ if (sec.id !== sec.original_id) {
+ fs.writeFileSync(pth.join(path, "original-id.json"), JSON.stringify({ original_id: sec.original_id}), "utf8");
+ }
+ if (sec.children && sec.children.length) {
+ if (depth === MAX_DEPTH || limitDepth[sec.id]) {
+ fs.writeFileSync(pth.join(path, "contains.json"), JSON.stringify(sec.children, null, 4), "utf8");
+ makeID2Path(path, sec.children);
+ }
+ else {
+ makeDirs(path, sec.children, depth + 1);
+ }
+ }
+ }
+}
+
+for (var sec in sections) {
+ var secDir = pth.join(testDir, sec);
+ mkdirp(secDir);
+ console.log("Launching extraction for " + sec);
+ extractSections(sec, secDir, sections[sec], function (err, toc, sec, secDir) {
+ if (err) return console.log("ERROR: " + err);
+ makeDirs(secDir, toc, 1);
+ for (var k in id2path) id2path[k] = id2path[k].replace(testDir + "/", "");
+ fs.writeFileSync(pth.join(__dirname, "id2path.json"), JSON.stringify(id2path, null, 4), "utf8");
+ });
+}
diff --git a/testing/web-platform/tests/tools/serve/__init__.py b/testing/web-platform/tests/tools/serve/__init__.py
new file mode 100644
index 000000000..fc2f2f0fd
--- /dev/null
+++ b/testing/web-platform/tests/tools/serve/__init__.py
@@ -0,0 +1 @@
+import serve
diff --git a/testing/web-platform/tests/tools/serve/serve.py b/testing/web-platform/tests/tools/serve/serve.py
new file mode 100644
index 000000000..fc0831351
--- /dev/null
+++ b/testing/web-platform/tests/tools/serve/serve.py
@@ -0,0 +1,617 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import print_function
+
+import argparse
+import json
+import os
+import re
+import signal
+import socket
+import sys
+import threading
+import time
+import traceback
+import urllib2
+import uuid
+from collections import defaultdict, OrderedDict
+from multiprocessing import Process, Event
+
+from ..localpaths import repo_root
+
+import sslutils
+from manifest.sourcefile import meta_re
+from wptserve import server as wptserve, handlers
+from wptserve import stash
+from wptserve.logger import set_logger
+from wptserve.handlers import filesystem_path
+from mod_pywebsocket import standalone as pywebsocket
+
+def replace_end(s, old, new):
+ """
+ Given a string `s` that ends with `old`, replace that occurrence of `old`
+ with `new`.
+ """
+ assert s.endswith(old)
+ return s[:-len(old)] + new
+
+
+class WorkersHandler(object):
+ def __init__(self, base_path=None, url_base="/"):
+ self.base_path = base_path
+ self.url_base = url_base
+ self.handler = handlers.handler(self.handle_request)
+
+ def __call__(self, request, response):
+ return self.handler(request, response)
+
+ def handle_request(self, request, response):
+ worker_path = replace_end(request.url_parts.path, ".worker", ".worker.js")
+ meta = self._get_meta(request)
+ return """<!doctype html>
+<meta charset=utf-8>
+%(meta)s
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<div id=log></div>
+<script>
+fetch_tests_from_worker(new Worker("%(worker_path)s"));
+</script>
+""" % {"meta": meta, "worker_path": worker_path}
+
+ def _get_meta(self, request):
+ path = filesystem_path(self.base_path, request, self.url_base)
+ path = path.replace(".worker", ".worker.js")
+ meta_values = []
+ with open(path) as f:
+ for line in f:
+ m = meta_re.match(line)
+ if m:
+ name, content = m.groups()
+ name = name.replace('"', '\\"').replace(">", "&gt;")
+ content = content.replace('"', '\\"').replace(">", "&gt;")
+ meta_values.append((name, content))
+ return "\n".join('<meta name="%s" content="%s">' % item for item in meta_values)
+
+
+
+class AnyHtmlHandler(object):
+ def __init__(self):
+ self.handler = handlers.handler(self.handle_request)
+
+ def __call__(self, request, response):
+ return self.handler(request, response)
+
+ def handle_request(self, request, response):
+ test_path = replace_end(request.url_parts.path, ".any.html", ".any.js")
+ return """\
+<!doctype html>
+<meta charset=utf-8>
+<script>
+self.GLOBAL = {
+ isWindow: function() { return true; },
+ isWorker: function() { return false; },
+};
+</script>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<div id=log></div>
+<script src="%s"></script>
+""" % (test_path,)
+
+
+class AnyWorkerHandler(object):
+ def __init__(self):
+ self.handler = handlers.handler(self.handle_request)
+
+ def __call__(self, request, response):
+ return self.handler(request, response)
+
+ def handle_request(self, request, response):
+ test_path = replace_end(request.url_parts.path, ".any.worker.js", ".any.js")
+ return """\
+self.GLOBAL = {
+ isWindow: function() { return false; },
+ isWorker: function() { return true; },
+};
+importScripts("/resources/testharness.js");
+importScripts("%s");
+done();
+""" % (test_path,)
+
+
+rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
+
+subdomains = [u"www",
+ u"www1",
+ u"www2",
+ u"天気ã®è‰¯ã„æ—¥",
+ u"élève"]
+
+class RoutesBuilder(object):
+ def __init__(self):
+ self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
+ ("POST", "/tools/runner/update_manifest.py",
+ handlers.python_script_handler)]
+
+ self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
+ ("*", "/tools/*", handlers.ErrorHandler(404)),
+ ("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
+ ("*", "/serve.py", handlers.ErrorHandler(404))]
+
+ self.static = [
+ ("GET", "*.worker", WorkersHandler()),
+ ("GET", "*.any.html", AnyHtmlHandler()),
+ ("GET", "*.any.worker.js", AnyWorkerHandler()),
+ ]
+
+ self.mountpoint_routes = OrderedDict()
+
+ self.add_mount_point("/", None)
+
+ def get_routes(self):
+ routes = self.forbidden_override + self.forbidden + self.static
+ # Using reversed here means that mount points that are added later
+ # get higher priority. This makes sense since / is typically added
+ # first.
+ for item in reversed(self.mountpoint_routes.values()):
+ routes.extend(item)
+ return routes
+
+ def add_static(self, path, format_args, content_type, route):
+ handler = handlers.StaticHandler(path, format_args, content_type)
+ self.static.append((b"GET", str(route), handler))
+
+ def add_mount_point(self, url_base, path):
+ url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
+
+ self.mountpoint_routes[url_base] = []
+
+ routes = [("GET", "*.asis", handlers.AsIsHandler),
+ ("*", "*.py", handlers.PythonScriptHandler),
+ ("GET", "*", handlers.FileHandler)]
+
+ for (method, suffix, handler_cls) in routes:
+ self.mountpoint_routes[url_base].append(
+ (method,
+ b"%s%s" % (str(url_base) if url_base != "/" else "", str(suffix)),
+ handler_cls(base_path=path, url_base=url_base)))
+
+
+def default_routes():
+ return RoutesBuilder().get_routes()
+
+
+def setup_logger(level):
+ import logging
+ global logger
+ logger = logging.getLogger("web-platform-tests")
+ logging.basicConfig(level=getattr(logging, level.upper()))
+ set_logger(logger)
+
+
+def open_socket(port):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if port != 0:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(('127.0.0.1', port))
+ sock.listen(5)
+ return sock
+
+
+def get_port():
+ free_socket = open_socket(0)
+ port = free_socket.getsockname()[1]
+ logger.debug("Going to use port %s" % port)
+ free_socket.close()
+ return port
+
+
+class ServerProc(object):
+ def __init__(self):
+ self.proc = None
+ self.daemon = None
+ self.stop = Event()
+
+ def start(self, init_func, host, port, paths, routes, bind_hostname, external_config,
+ ssl_config, **kwargs):
+ self.proc = Process(target=self.create_daemon,
+ args=(init_func, host, port, paths, routes, bind_hostname,
+ external_config, ssl_config))
+ self.proc.daemon = True
+ self.proc.start()
+
+ def create_daemon(self, init_func, host, port, paths, routes, bind_hostname,
+ external_config, ssl_config, **kwargs):
+ try:
+ self.daemon = init_func(host, port, paths, routes, bind_hostname, external_config,
+ ssl_config, **kwargs)
+ except socket.error:
+ print("Socket error on port %s" % port, file=sys.stderr)
+ raise
+ except:
+ print(traceback.format_exc(), file=sys.stderr)
+ raise
+
+ if self.daemon:
+ try:
+ self.daemon.start(block=False)
+ try:
+ self.stop.wait()
+ except KeyboardInterrupt:
+ pass
+ except:
+ print(traceback.format_exc(), file=sys.stderr)
+ raise
+
+ def wait(self):
+ self.stop.set()
+ self.proc.join()
+
+ def kill(self):
+ self.stop.set()
+ self.proc.terminate()
+ self.proc.join()
+
+ def is_alive(self):
+ return self.proc.is_alive()
+
+
+def check_subdomains(host, paths, bind_hostname, ssl_config):
+ port = get_port()
+ subdomains = get_subdomains(host)
+
+ wrapper = ServerProc()
+ wrapper.start(start_http_server, host, port, paths, default_routes(), bind_hostname,
+ None, ssl_config)
+
+ connected = False
+ for i in range(10):
+ try:
+ urllib2.urlopen("http://%s:%d/" % (host, port))
+ connected = True
+ break
+ except urllib2.URLError:
+ time.sleep(1)
+
+ if not connected:
+ logger.critical("Failed to connect to test server on http://%s:%s You may need to edit /etc/hosts or similar" % (host, port))
+ sys.exit(1)
+
+ for subdomain, (punycode, host) in subdomains.iteritems():
+ domain = "%s.%s" % (punycode, host)
+ try:
+ urllib2.urlopen("http://%s:%d/" % (domain, port))
+ except Exception as e:
+ logger.critical("Failed probing domain %s. You may need to edit /etc/hosts or similar." % domain)
+ sys.exit(1)
+
+ wrapper.wait()
+
+
+def get_subdomains(host):
+ #This assumes that the tld is ascii-only or already in punycode
+ return {subdomain: (subdomain.encode("idna"), host)
+ for subdomain in subdomains}
+
+
+def start_servers(host, ports, paths, routes, bind_hostname, external_config, ssl_config,
+ **kwargs):
+ servers = defaultdict(list)
+ for scheme, ports in ports.iteritems():
+ assert len(ports) == {"http":2}.get(scheme, 1)
+
+ for port in ports:
+ if port is None:
+ continue
+ init_func = {"http":start_http_server,
+ "https":start_https_server,
+ "ws":start_ws_server,
+ "wss":start_wss_server}[scheme]
+
+ server_proc = ServerProc()
+ server_proc.start(init_func, host, port, paths, routes, bind_hostname,
+ external_config, ssl_config, **kwargs)
+ servers[scheme].append((port, server_proc))
+
+ return servers
+
+
+def start_http_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
+ **kwargs):
+ return wptserve.WebTestHttpd(host=host,
+ port=port,
+ doc_root=paths["doc_root"],
+ routes=routes,
+ rewrites=rewrites,
+ bind_hostname=bind_hostname,
+ config=external_config,
+ use_ssl=False,
+ key_file=None,
+ certificate=None,
+ latency=kwargs.get("latency"))
+
+
+def start_https_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
+ **kwargs):
+ return wptserve.WebTestHttpd(host=host,
+ port=port,
+ doc_root=paths["doc_root"],
+ routes=routes,
+ rewrites=rewrites,
+ bind_hostname=bind_hostname,
+ config=external_config,
+ use_ssl=True,
+ key_file=ssl_config["key_path"],
+ certificate=ssl_config["cert_path"],
+ encrypt_after_connect=ssl_config["encrypt_after_connect"],
+ latency=kwargs.get("latency"))
+
+
+class WebSocketDaemon(object):
+ def __init__(self, host, port, doc_root, handlers_root, log_level, bind_hostname,
+ ssl_config):
+ self.host = host
+ cmd_args = ["-p", port,
+ "-d", doc_root,
+ "-w", handlers_root,
+ "--log-level", log_level]
+
+ if ssl_config is not None:
+ # This is usually done through pywebsocket.main, however we're
+ # working around that to get the server instance and manually
+ # setup the wss server.
+ if pywebsocket._import_ssl():
+ tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
+ elif pywebsocket._import_pyopenssl():
+ tls_module = pywebsocket._TLS_BY_PYOPENSSL
+ else:
+ print("No SSL module available")
+ sys.exit(1)
+
+ cmd_args += ["--tls",
+ "--private-key", ssl_config["key_path"],
+ "--certificate", ssl_config["cert_path"],
+ "--tls-module", tls_module]
+
+ if (bind_hostname):
+ cmd_args = ["-H", host] + cmd_args
+ opts, args = pywebsocket._parse_args_and_config(cmd_args)
+ opts.cgi_directories = []
+ opts.is_executable_method = None
+ self.server = pywebsocket.WebSocketServer(opts)
+ ports = [item[0].getsockname()[1] for item in self.server._sockets]
+ assert all(item == ports[0] for item in ports)
+ self.port = ports[0]
+ self.started = False
+ self.server_thread = None
+
+ def start(self, block=False):
+ self.started = True
+ if block:
+ self.server.serve_forever()
+ else:
+ self.server_thread = threading.Thread(target=self.server.serve_forever)
+ self.server_thread.setDaemon(True) # don't hang on exit
+ self.server_thread.start()
+
+ def stop(self):
+ """
+ Stops the server.
+
+ If the server is not running, this method has no effect.
+ """
+ if self.started:
+ try:
+ self.server.shutdown()
+ self.server.server_close()
+ self.server_thread.join()
+ self.server_thread = None
+ except AttributeError:
+ pass
+ self.started = False
+ self.server = None
+
+
+def start_ws_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
+ **kwargs):
+ return WebSocketDaemon(host,
+ str(port),
+ repo_root,
+ paths["ws_doc_root"],
+ "debug",
+ bind_hostname,
+ ssl_config = None)
+
+
+def start_wss_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
+ **kwargs):
+ return WebSocketDaemon(host,
+ str(port),
+ repo_root,
+ paths["ws_doc_root"],
+ "debug",
+ bind_hostname,
+ ssl_config)
+
+
+def get_ports(config, ssl_environment):
+ rv = defaultdict(list)
+ for scheme, ports in config["ports"].iteritems():
+ for i, port in enumerate(ports):
+ if scheme in ["wss", "https"] and not ssl_environment.ssl_enabled:
+ port = None
+ if port == "auto":
+ port = get_port()
+ else:
+ port = port
+ rv[scheme].append(port)
+ return rv
+
+
+
+def normalise_config(config, ports):
+ host = config["external_host"] if config["external_host"] else config["host"]
+ domains = get_subdomains(host)
+ ports_ = {}
+ for scheme, ports_used in ports.iteritems():
+ ports_[scheme] = ports_used
+
+ for key, value in domains.iteritems():
+ domains[key] = ".".join(value)
+
+ domains[""] = host
+
+ ports_ = {}
+ for scheme, ports_used in ports.iteritems():
+ ports_[scheme] = ports_used
+
+ return {"host": host,
+ "domains": domains,
+ "ports": ports_}
+
+
+def get_ssl_config(config, external_domains, ssl_environment):
+ key_path, cert_path = ssl_environment.host_cert_path(external_domains)
+ return {"key_path": key_path,
+ "cert_path": cert_path,
+ "encrypt_after_connect": config["ssl"]["encrypt_after_connect"]}
+
+def start(config, ssl_environment, routes, **kwargs):
+ host = config["host"]
+ domains = get_subdomains(host)
+ ports = get_ports(config, ssl_environment)
+ bind_hostname = config["bind_hostname"]
+
+ paths = {"doc_root": config["doc_root"],
+ "ws_doc_root": config["ws_doc_root"]}
+
+ external_config = normalise_config(config, ports)
+
+ ssl_config = get_ssl_config(config, external_config["domains"].values(), ssl_environment)
+
+ if config["check_subdomains"]:
+ check_subdomains(host, paths, bind_hostname, ssl_config)
+
+ servers = start_servers(host, ports, paths, routes, bind_hostname, external_config,
+ ssl_config, **kwargs)
+
+ return external_config, servers
+
+
+def iter_procs(servers):
+ for servers in servers.values():
+ for port, server in servers:
+ yield server.proc
+
+
+def value_set(config, key):
+ return key in config and config[key] is not None
+
+
+def get_value_or_default(config, key, default=None):
+ return config[key] if value_set(config, key) else default
+
+
+def set_computed_defaults(config):
+ if not value_set(config, "doc_root"):
+ config["doc_root"] = repo_root
+
+ if not value_set(config, "ws_doc_root"):
+ root = get_value_or_default(config, "doc_root", default=repo_root)
+ config["ws_doc_root"] = os.path.join(root, "websockets", "handlers")
+
+
+def merge_json(base_obj, override_obj):
+ rv = {}
+ for key, value in base_obj.iteritems():
+ if key not in override_obj:
+ rv[key] = value
+ else:
+ if isinstance(value, dict):
+ rv[key] = merge_json(value, override_obj[key])
+ else:
+ rv[key] = override_obj[key]
+ return rv
+
+
+def get_ssl_environment(config):
+ implementation_type = config["ssl"]["type"]
+ cls = sslutils.environments[implementation_type]
+ try:
+ kwargs = config["ssl"][implementation_type].copy()
+ except KeyError:
+ raise ValueError("%s is not a vaid ssl type." % implementation_type)
+ return cls(logger, **kwargs)
+
+
+def load_config(default_path, override_path=None, **kwargs):
+ if os.path.exists(default_path):
+ with open(default_path) as f:
+ base_obj = json.load(f)
+ else:
+ raise ValueError("Config path %s does not exist" % default_path)
+
+ if os.path.exists(override_path):
+ with open(override_path) as f:
+ override_obj = json.load(f)
+ else:
+ override_obj = {}
+ rv = merge_json(base_obj, override_obj)
+
+ if kwargs.get("config_path"):
+ other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
+ if os.path.exists(other_path):
+ base_obj = rv
+ with open(other_path) as f:
+ override_obj = json.load(f)
+ rv = merge_json(base_obj, override_obj)
+ else:
+ raise ValueError("Config path %s does not exist" % other_path)
+
+ overriding_path_args = [("doc_root", "Document root"),
+ ("ws_doc_root", "WebSockets document root")]
+ for key, title in overriding_path_args:
+ value = kwargs.get(key)
+ if value is None:
+ continue
+ value = os.path.abspath(os.path.expanduser(value))
+ if not os.path.exists(value):
+ raise ValueError("%s path %s does not exist" % (title, value))
+ rv[key] = value
+
+ set_computed_defaults(rv)
+ return rv
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--latency", type=int,
+ help="Artificial latency to add before sending http responses, in ms")
+ parser.add_argument("--config", action="store", dest="config_path",
+ help="Path to external config file")
+ parser.add_argument("--doc_root", action="store", dest="doc_root",
+ help="Path to document root. Overrides config.")
+ parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
+ help="Path to WebSockets document root. Overrides config.")
+ return parser
+
+
+def main():
+ kwargs = vars(get_parser().parse_args())
+ config = load_config("config.default.json",
+ "config.json",
+ **kwargs)
+
+ setup_logger(config["log_level"])
+
+ with stash.StashServer((config["host"], get_port()), authkey=str(uuid.uuid4())):
+ with get_ssl_environment(config) as ssl_env:
+ config_, servers = start(config, ssl_env, default_routes(), **kwargs)
+
+ try:
+ while any(item.is_alive() for item in iter_procs(servers)):
+ for item in iter_procs(servers):
+ item.join(1)
+ except KeyboardInterrupt:
+ logger.info("Shutting down")
diff --git a/testing/web-platform/tests/tools/six/.gitignore b/testing/web-platform/tests/tools/six/.gitignore
new file mode 100644
index 000000000..809b2e84e
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/.gitignore
@@ -0,0 +1,9 @@
+*#
+*.py[co]
+*.sw[po]
+*~
+MANIFEST
+documentation/_build
+\#*
+.tox
+six.egg-info
diff --git a/testing/web-platform/tests/tools/six/CHANGES b/testing/web-platform/tests/tools/six/CHANGES
new file mode 100644
index 000000000..25930bd9a
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/CHANGES
@@ -0,0 +1,246 @@
+Changelog for six
+=================
+
+This file lists the changes in each six version.
+
+Development version
+-------------------
+
+- Issue #105 and pull request #58: Ensure `six.wraps` respects the *updated* and
+ *assigned* arguments.
+
+- Issue #102: Add `raise_from` to abstract out Python 3's raise from syntax.
+
+- Issue #97: Optimize `six.iterbytes` on Python 2.
+
+- Issue #98: Fix `six.moves` race condition in multi-threaded code.
+
+- Pull request #51: Add `six.view(keys|values|itmes)`, which provide dictionary
+ views on Python 2.7+.
+
+1.8.0
+-----
+
+- Issue #90: Add `six.moves.shlex_quote`.
+
+- Issue #59: Add `six.moves.intern`.
+
+- Add `six.urllib.parse.uses_(fragment|netloc|params|query|relative)`.
+
+- Issue #88: Fix add_metaclass when the class has `__slots__` containing
+ `__weakref__` or `__dict__`.
+
+- Issue #89: Make six use absolute imports.
+
+- Issue #85: Always accept *updated* and *assigned* arguments for `wraps()`.
+
+- Issue #86: In `reraise()`, instantiate the exception if the second argument is
+ `None`.
+
+- Pull request #45: Add `six.moves.email_mime_nonmultipart`.
+
+- Issue #81: Add `six.urllib.request.splittag` mapping.
+
+- Issue #80: Add `six.urllib.request.splituser` mapping.
+
+1.7.3
+-----
+
+- Issue #77: Fix import six on Python 3.4 with a custom loader.
+
+- Issue #74: `six.moves.xmlrpc_server` should map to `SimpleXMLRPCServer` on Python
+ 2 as documented not `xmlrpclib`.
+
+1.7.2
+-----
+
+- Issue #72: Fix installing on Python 2.
+
+1.7.1
+-----
+
+- Issue #71: Make the six.moves meta path importer handle reloading of the six
+ module gracefully.
+
+1.7.0
+-----
+
+- Pull request #30: Implement six.moves with a PEP 302 meta path hook.
+
+- Pull request #32: Add six.wraps, which is like functools.wraps but always sets
+ the __wrapped__ attribute.
+
+- Pull request #35: Improve add_metaclass, so that it doesn't end up inserting
+ another class into the hierarchy.
+
+- Pull request #34: Add import mappings for dummy_thread.
+
+- Pull request #33: Add import mappings for UserDict and UserList.
+
+- Pull request #31: Select the implementations of dictionary iterator routines
+ at import time for a 20% speed boost.
+
+1.6.1
+-----
+
+- Raise an AttributeError for six.moves.X when X is a module not available in
+ the current interpreter.
+
+1.6.0
+-----
+
+- Raise an AttributeError for every attribute of unimportable modules.
+
+- Issue #56: Make the fake modules six.moves puts into sys.modules appear not to
+ have a __path__ unless they are loaded.
+
+- Pull request #28: Add support for SplitResult.
+
+- Issue #55: Add move mapping for xmlrpc.server.
+
+- Pull request #29: Add move for urllib.parse.splitquery.
+
+1.5.2
+-----
+
+- Issue #53: Make the fake modules six.moves puts into sys.modules appear not to
+ have a __name__ unless they are loaded.
+
+1.5.1
+-----
+
+- Issue #51: Hack around the Django autoreloader after recent six.moves changes.
+
+1.5.0
+-----
+
+- Removed support for Python 2.4. This is because py.test no longer supports
+ 2.4.
+
+- Fix various import problems including issues #19 and #41. six.moves modules
+ are now lazy wrappers over the underlying modules instead of the actual
+ modules themselves.
+
+- Issue #49: Add six.moves mapping for tkinter.ttk.
+
+- Pull request #24: Add __dir__ special method to six.moves modules.
+
+- Issue #47: Fix add_metaclass on classes with a string for the __slots__
+ variable.
+
+- Issue #44: Fix interpretation of backslashes on Python 2 in the u() function.
+
+- Pull request #21: Add import mapping for urllib's proxy_bypass function.
+
+- Issue #43: Add import mapping for the Python 2 xmlrpclib module.
+
+- Issue #39: Add import mapping for the Python 2 thread module.
+
+- Issue #40: Add import mapping for the Python 2 gdbm module.
+
+- Issue #35: On Python versions less than 2.7, print_ now encodes unicode
+ strings when outputing to standard streams. (Python 2.7 handles this
+ automatically.)
+
+1.4.1
+-----
+
+- Issue #32: urllib module wrappings don't work when six is not a toplevel file.
+
+1.4.0
+-----
+
+- Issue #31: Add six.moves mapping for UserString.
+
+- Pull request #12: Add six.add_metaclass, a decorator for adding a metaclass to
+ a class.
+
+- Add six.moves.zip_longest and six.moves.filterfalse, which correspond
+ respectively to itertools.izip_longest and itertools.ifilterfalse on Python 2
+ and itertools.zip_longest and itertools.filterfalse on Python 3.
+
+- Issue #25: Add the unichr function, which returns a string for a Unicode
+ codepoint.
+
+- Issue #26: Add byte2int function, which complements int2byte.
+
+- Add a PY2 constant with obvious semantics.
+
+- Add helpers for indexing and iterating over bytes: iterbytes and indexbytes.
+
+- Add create_bound_method() wrapper.
+
+- Issue #23: Allow multiple base classes to be passed to with_metaclass.
+
+- Issue #24: Add six.moves.range alias. This exactly the same as the current
+ xrange alias.
+
+- Pull request #5: Create six.moves.urllib, which contains abstractions for a
+ bunch of things which are in urllib in Python 3 and spread out across urllib,
+ urllib2, and urlparse in Python 2.
+
+1.3.0
+-----
+
+- Issue #21: Add methods to access the closure and globals of a function.
+
+- In six.iter(items/keys/values/lists), passed keyword arguments through to the
+ underlying method.
+
+- Add six.iterlists().
+
+- Issue #20: Fix tests if tkinter is not available.
+
+- Issue #17: Define callable to be builtin callable when it is available again
+ in Python 3.2+.
+
+- Issue #16: Rename Python 2 exec_'s arguments, so casually calling exec_ with
+ keyword arguments will raise.
+
+- Issue #14: Put the six.moves package in sys.modules based on the name six is
+ imported under.
+
+- Fix Jython detection.
+
+- Pull request #4: Add email_mime_multipart, email_mime_text, and
+ email_mime_base to six.moves.
+
+1.2.0
+-----
+
+- Issue #13: Make iterkeys/itervalues/iteritems return iterators on Python 3
+ instead of iterables.
+
+- Issue #11: Fix maxsize support on Jython.
+
+- Add six.next() as an alias for six.advance_iterator().
+
+- Use the builtin next() function for advance_iterator() where is available
+ (2.6+), not just Python 3.
+
+- Add the Iterator class for writing portable iterators.
+
+1.1.0
+-----
+
+- Add the int2byte function.
+
+- Add compatibility mappings for iterators over the keys, values, and items of a
+ dictionary.
+
+- Fix six.MAXSIZE on platforms where sizeof(long) != sizeof(Py_ssize_t).
+
+- Issue #3: Add six.moves mappings for filter, map, and zip.
+
+1.0.0
+-----
+
+- Issue #2: u() on Python 2.x now resolves unicode escapes.
+
+- Expose an API for adding mappings to six.moves.
+
+1.0 beta 1
+----------
+
+- Reworked six into one .py file. This breaks imports. Please tell me if you
+ are interested in an import compatibility layer.
diff --git a/testing/web-platform/tests/tools/six/CONTRIBUTORS b/testing/web-platform/tests/tools/six/CONTRIBUTORS
new file mode 100644
index 000000000..0cbd0a402
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/CONTRIBUTORS
@@ -0,0 +1,22 @@
+The primary author and maintainer of six is Benjamin Peterson. He would like to
+acknowledge the following people who submitted bug reports, pull requests, and
+otherwise worked to improve six:
+
+Marc Abramowitz
+Alexander Artemenko
+Aymeric Augustin
+Ned Batchelder
+Jason R. Coombs
+Julien Danjou
+Ben Darnell
+Ben Davis
+Joshua Harlow
+Anselm Kruis
+Alexander Lukanin
+James Mills
+Sridhar Ratnakumar
+Erik Rose
+Peter Ruibal
+Miroslav Shubernetskiy
+
+If you think you belong on this list, please let me know! --Benjamin
diff --git a/testing/web-platform/tests/tools/six/LICENSE b/testing/web-platform/tests/tools/six/LICENSE
new file mode 100644
index 000000000..d76e02426
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2010-2014 Benjamin Peterson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/testing/web-platform/tests/tools/six/MANIFEST.in b/testing/web-platform/tests/tools/six/MANIFEST.in
new file mode 100644
index 000000000..b924e068e
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/MANIFEST.in
@@ -0,0 +1,6 @@
+include CHANGES
+include LICENSE
+include test_six.py
+
+recursive-include documentation *
+prune documentation/_build
diff --git a/testing/web-platform/tests/tools/six/README b/testing/web-platform/tests/tools/six/README
new file mode 100644
index 000000000..32bab7cee
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/README
@@ -0,0 +1,16 @@
+Six is a Python 2 and 3 compatibility library. It provides utility functions
+for smoothing over the differences between the Python versions with the goal of
+writing Python code that is compatible on both Python versions. See the
+documentation for more information on what is provided.
+
+Six supports every Python version since 2.5. It is contained in only one Python
+file, so it can be easily copied into your project. (The copyright and license
+notice must be retained.)
+
+Online documentation is at http://pythonhosted.org/six/.
+
+Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also
+be found there.
+
+For questions about six or porting in general, email the python-porting mailing
+list: http://mail.python.org/mailman/listinfo/python-porting
diff --git a/testing/web-platform/tests/tools/six/documentation/Makefile b/testing/web-platform/tests/tools/six/documentation/Makefile
new file mode 100644
index 000000000..eebafcd6d
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/documentation/Makefile
@@ -0,0 +1,130 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/six.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/six.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/six"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/six"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ make -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/testing/web-platform/tests/tools/six/documentation/conf.py b/testing/web-platform/tests/tools/six/documentation/conf.py
new file mode 100644
index 000000000..7e54287a6
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/documentation/conf.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+#
+# six documentation build configuration file
+
+import os
+import sys
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+needs_sphinx = "1.0"
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ["sphinx.ext.intersphinx"]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# The suffix of source filenames.
+source_suffix = ".rst"
+
+# The encoding of source files.
+#source_encoding = "utf-8-sig"
+
+# The master toctree document.
+master_doc = "index"
+
+# General information about the project.
+project = u"six"
+copyright = u"2010-2014, Benjamin Peterson"
+
+sys.path.append(os.path.abspath(os.path.join(".", "..")))
+from six import __version__ as six_version
+sys.path.pop()
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = six_version[:-2]
+# The full version, including alpha/beta/rc tags.
+release = six_version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ["_build"]
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = "default"
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'sixdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ("index", "six.tex", u"six Documentation",
+ u"Benjamin Peterson", "manual"),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ("index", "six", u"six Documentation",
+ [u"Benjamin Peterson"], 1)
+]
+
+# -- Intersphinx ---------------------------------------------------------------
+
+intersphinx_mapping = {"py2" : ("https://docs.python.org/2/", None),
+ "py3" : ("https://docs.python.org/3/", None)}
diff --git a/testing/web-platform/tests/tools/six/documentation/index.rst b/testing/web-platform/tests/tools/six/documentation/index.rst
new file mode 100644
index 000000000..7851421a9
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/documentation/index.rst
@@ -0,0 +1,785 @@
+Six: Python 2 and 3 Compatibility Library
+=========================================
+
+.. module:: six
+ :synopsis: Python 2 and 3 compatibility
+
+.. moduleauthor:: Benjamin Peterson <benjamin@python.org>
+.. sectionauthor:: Benjamin Peterson <benjamin@python.org>
+
+
+Six provides simple utilities for wrapping over differences between Python 2 and
+Python 3. It is intended to support codebases that work on both Python 2 and 3
+without modification. six consists of only one Python file, so it is painless
+to copy into a project.
+
+Six can be downloaded on `PyPi <http://pypi.python.org/pypi/six/>`_. Its bug
+tracker and code hosting is on `BitBucket <http://bitbucket.org/gutworth/six>`_.
+
+The name, "six", comes from the fact that 2*3 equals 6. Why not addition?
+Multiplication is more powerful, and, anyway, "five" has already been snatched
+away by the Zope Five project.
+
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`search`
+
+
+Package contents
+----------------
+
+.. data:: PY2
+
+ A boolean indicating if the code is running on Python 2.
+
+.. data:: PY3
+
+ A boolean indicating if the code is running on Python 3.
+
+
+Constants
+>>>>>>>>>
+
+Six provides constants that may differ between Python versions. Ones ending
+``_types`` are mostly useful as the second argument to ``isinstance`` or
+``issubclass``.
+
+
+.. data:: class_types
+
+ Possible class types. In Python 2, this encompasses old-style and new-style
+ classes. In Python 3, this is just new-styles.
+
+
+.. data:: integer_types
+
+ Possible integer types. In Python 2, this is :func:`py2:long` and
+ :func:`py2:int`, and in Python 3, just :func:`py3:int`.
+
+
+.. data:: string_types
+
+ Possible types for text data. This is :func:`py2:basestring` in Python 2 and
+ :func:`py3:str` in Python 3.
+
+
+.. data:: text_type
+
+ Type for representing (Unicode) textual data. This is :func:`py2:unicode` in
+ Python 2 and :func:`py3:str` in Python 3.
+
+
+.. data:: binary_type
+
+ Type for representing binary data. This is :func:`py2:str` in Python 2 and
+ :func:`py3:bytes` in Python 3.
+
+
+.. data:: MAXSIZE
+
+ The maximum size of a container like :func:`py3:list` or :func:`py3:dict`.
+ This is equivalent to :data:`py3:sys.maxsize` in Python 2.6 and later
+ (including 3.x). Note, this is temptingly similar to, but not the same as
+ :data:`py2:sys.maxint` in Python 2. There is no direct equivalent to
+ :data:`py2:sys.maxint` in Python 3 because its integer type has no limits
+ aside from memory.
+
+
+Here's example usage of the module::
+
+ import six
+
+ def dispatch_types(value):
+ if isinstance(value, six.integer_types):
+ handle_integer(value)
+ elif isinstance(value, six.class_types):
+ handle_class(value)
+ elif isinstance(value, six.string_types):
+ handle_string(value)
+
+
+Object model compatibility
+>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+Python 3 renamed the attributes of several intepreter data structures. The
+following accessors are available. Note that the recommended way to inspect
+functions and methods is the stdlib :mod:`py3:inspect` module.
+
+
+.. function:: get_unbound_function(meth)
+
+ Get the function out of unbound method *meth*. In Python 3, unbound methods
+ don't exist, so this function just returns *meth* unchanged. Example
+ usage::
+
+ from six import get_unbound_function
+
+ class X(object):
+ def method(self):
+ pass
+ method_function = get_unbound_function(X.method)
+
+
+.. function:: get_method_function(meth)
+
+ Get the function out of method object *meth*.
+
+
+.. function:: get_method_self(meth)
+
+ Get the ``self`` of bound method *meth*.
+
+
+.. function:: get_function_closure(func)
+
+ Get the closure (list of cells) associated with *func*. This is equivalent
+ to ``func.__closure__`` on Python 2.6+ and ``func.func_closure`` on Python
+ 2.5.
+
+
+.. function:: get_function_code(func)
+
+ Get the code object associated with *func*. This is equivalent to
+ ``func.__code__`` on Python 2.6+ and ``func.func_code`` on Python 2.5.
+
+
+.. function:: get_function_defaults(func)
+
+ Get the defaults tuple associated with *func*. This is equivalent to
+ ``func.__defaults__`` on Python 2.6+ and ``func.func_defaults`` on Python
+ 2.5.
+
+
+.. function:: get_function_globals(func)
+
+ Get the globals of *func*. This is equivalent to ``func.__globals__`` on
+ Python 2.6+ and ``func.func_globals`` on Python 2.5.
+
+
+.. function:: next(it)
+ advance_iterator(it)
+
+ Get the next item of iterator *it*. :exc:`py3:StopIteration` is raised if
+ the iterator is exhausted. This is a replacement for calling ``it.next()``
+ in Python 2 and ``next(it)`` in Python 3.
+
+
+.. function:: callable(obj)
+
+ Check if *obj* can be called. Note ``callable`` has returned in Python 3.2,
+ so using six's version is only necessary when supporting Python 3.0 or 3.1.
+
+
+.. function:: iterkeys(dictionary, **kwargs)
+
+ Returns an iterator over *dictionary*\'s keys. This replaces
+ ``dictionary.iterkeys()`` on Python 2 and ``dictionary.keys()`` on
+ Python 3. *kwargs* are passed through to the underlying method.
+
+
+.. function:: itervalues(dictionary, **kwargs)
+
+ Returns an iterator over *dictionary*\'s values. This replaces
+ ``dictionary.itervalues()`` on Python 2 and ``dictionary.values()`` on
+ Python 3. *kwargs* are passed through to the underlying method.
+
+
+.. function:: iteritems(dictionary, **kwargs)
+
+ Returns an iterator over *dictionary*\'s items. This replaces
+ ``dictionary.iteritems()`` on Python 2 and ``dictionary.items()`` on
+ Python 3. *kwargs* are passed through to the underlying method.
+
+
+.. function:: iterlists(dictionary, **kwargs)
+
+ Calls ``dictionary.iterlists()`` on Python 2 and ``dictionary.lists()`` on
+ Python 3. No builtin Python mapping type has such a method; this method is
+ intended for use with multi-valued dictionaries like `Werkzeug's
+ <http://werkzeug.pocoo.org/docs/datastructures/#werkzeug.datastructures.MultiDict>`_.
+ *kwargs* are passed through to the underlying method.
+
+
+.. function:: viewkeys(dictionary)
+
+ Return a view over *dictionary*\'s keys. This replaces
+ :meth:`py2:dict.viewkeys` on Python 2.7 and :meth:`py3:dict.keys` on
+ Python 3.
+
+
+.. function:: viewvalues(dictionary)
+
+ Return a view over *dictionary*\'s values. This replaces
+ :meth:`py2:dict.viewvalues` on Python 2.7 and :meth:`py3:dict.values` on
+ Python 3.
+
+
+.. function:: viewitems(dictionary)
+
+ Return a view over *dictionary*\'s items. This replaces
+ :meth:`py2:dict.viewitems` on Python 2.7 and :meth:`py3:dict.items` on
+ Python 3.
+
+
+.. function:: create_bound_method(func, obj)
+
+ Return a method object wrapping *func* and bound to *obj*. On both Python 2
+ and 3, this will return a :func:`py3:types.MethodType` object. The reason
+ this wrapper exists is that on Python 2, the ``MethodType`` constructor
+ requires the *obj*'s class to be passed.
+
+
+.. class:: Iterator
+
+ A class for making portable iterators. The intention is that it be subclassed
+ and subclasses provide a ``__next__`` method. In Python 2, :class:`Iterator`
+ has one method: ``next``. It simply delegates to ``__next__``. An alternate
+ way to do this would be to simply alias ``next`` to ``__next__``. However,
+ this interacts badly with subclasses that override
+ ``__next__``. :class:`Iterator` is empty on Python 3. (In fact, it is just
+ aliased to :class:`py3:object`.)
+
+
+.. function:: wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES)
+
+ This is exactly the :func:`py3:functools.wraps` decorator, but it sets the
+ ``__wrapped__`` attribute on what it decorates as :func:`py3:functools.wraps`
+ does on Python versions after 3.2.
+
+
+Syntax compatibility
+>>>>>>>>>>>>>>>>>>>>
+
+These functions smooth over operations which have different syntaxes between
+Python 2 and 3.
+
+
+.. function:: exec_(code, globals=None, locals=None)
+
+ Execute *code* in the scope of *globals* and *locals*. *code* can be a
+ string or a code object. If *globals* or *locals* are not given, they will
+ default to the scope of the caller. If just *globals* is given, it will also
+ be used as *locals*.
+
+ .. note::
+
+ Python 3's :func:`py3:exec` doesn't take keyword arguments, so calling
+ :func:`exec` with them should be avoided.
+
+
+.. function:: print_(*args, *, file=sys.stdout, end="\\n", sep=" ")
+
+ Print *args* into *file*. Each argument will be separated with *sep* and
+ *end* will be written to the file after the last argument is printed.
+
+ .. note::
+
+ In Python 2, this function imitates Python 3's :func:`py3:print` by not
+ having softspace support. If you don't know what that is, you're probably
+ ok. :)
+
+
+.. function:: raise_from(exc_value, exc_value_from)
+
+ Raise an exception from a context. On Python 3, this is equivalent to
+ ``raise exc_value from exc_value_from``. On Python 2, which does not support
+ exception chaining, it is equivalent to ``raise exc_value``.
+
+
+.. function:: reraise(exc_type, exc_value, exc_traceback=None)
+
+ Reraise an exception, possibly with a different traceback. In the simple
+ case, ``reraise(*sys.exc_info())`` with an active exception (in an except
+ block) reraises the current exception with the last traceback. A different
+ traceback can be specified with the *exc_traceback* parameter. Note that
+ since the exception reraising is done within the :func:`reraise` function,
+ Python will attach the call frame of :func:`reraise` to whatever traceback is
+ raised.
+
+
+.. function:: with_metaclass(metaclass, *bases)
+
+ Create a new class with base classes *bases* and metaclass *metaclass*. This
+ is designed to be used in class declarations like this: ::
+
+ from six import with_metaclass
+
+ class Meta(type):
+ pass
+
+ class Base(object):
+ pass
+
+ class MyClass(with_metaclass(Meta, Base)):
+ pass
+
+ Another way to set a metaclass on a class is with the :func:`add_metaclass`
+ decorator.
+
+
+.. function:: add_metaclass(metaclass)
+
+ Class decorator that replaces a normally-constructed class with a
+ metaclass-constructed one. Example usage: ::
+
+ @add_metaclass(Meta)
+ class MyClass(object):
+ pass
+
+ That code produces a class equivalent to ::
+
+ class MyClass(object, metaclass=Meta):
+ pass
+
+ on Python 3 or ::
+
+ class MyClass(object):
+ __metaclass__ = MyMeta
+
+ on Python 2.
+
+ Note that class decorators require Python 2.6. However, the effect of the
+ decorator can be emulated on Python 2.5 like so::
+
+ class MyClass(object):
+ pass
+ MyClass = add_metaclass(Meta)(MyClass)
+
+
+Binary and text data
+>>>>>>>>>>>>>>>>>>>>
+
+Python 3 enforces the distinction between byte strings and text strings far more
+rigoriously than Python 2 does; binary data cannot be automatically coerced to
+or from text data. six provides several functions to assist in classifying
+string data in all Python versions.
+
+
+.. function:: b(data)
+
+ A "fake" bytes literal. *data* should always be a normal string literal. In
+ Python 2, :func:`b` returns a 8-bit string. In Python 3, *data* is encoded
+ with the latin-1 encoding to bytes.
+
+
+ .. note::
+
+ Since all Python versions 2.6 and after support the ``b`` prefix,
+ :func:`b`, code without 2.5 support doesn't need :func:`b`.
+
+
+.. function:: u(text)
+
+ A "fake" unicode literal. *text* should always be a normal string literal.
+ In Python 2, :func:`u` returns unicode, and in Python 3, a string. Also, in
+ Python 2, the string is decoded with the ``unicode-escape`` codec, which
+ allows unicode escapes to be used in it.
+
+
+ .. note::
+
+ In Python 3.3, the ``u`` prefix has been reintroduced. Code that only
+ supports Python 3 versions greater than 3.3 thus does not need
+ :func:`u`.
+
+ .. note::
+
+ On Python 2, :func:`u` doesn't know what the encoding of the literal
+ is. Each byte is converted directly to the unicode codepoint of the same
+ value. Because of this, it's only safe to use :func:`u` with strings of
+ ASCII data.
+
+
+.. function:: unichr(c)
+
+ Return the (Unicode) string representing the codepoint *c*. This is
+ equivalent to :func:`py2:unichr` on Python 2 and :func:`py3:chr` on Python 3.
+
+
+.. function:: int2byte(i)
+
+ Converts *i* to a byte. *i* must be in ``range(0, 256)``. This is
+ equivalent to :func:`py2:chr` in Python 2 and ``bytes((i,))`` in Python 3.
+
+
+.. function:: byte2int(bs)
+
+ Converts the first byte of *bs* to an integer. This is equivalent to
+ ``ord(bs[0])`` on Python 2 and ``bs[0]`` on Python 3.
+
+
+.. function:: indexbytes(buf, i)
+
+ Return the byte at index *i* of *buf* as an integer. This is equivalent to
+ indexing a bytes object in Python 3.
+
+
+.. function:: iterbytes(buf)
+
+ Return an iterator over bytes in *buf* as integers. This is equivalent to
+ a bytes object iterator in Python 3.
+
+
+.. data:: StringIO
+
+ This is an fake file object for textual data. It's an alias for
+ :class:`py2:StringIO.StringIO` in Python 2 and :class:`py3:io.StringIO` in
+ Python 3.
+
+
+.. data:: BytesIO
+
+ This is a fake file object for binary data. In Python 2, it's an alias for
+ :class:`py2:StringIO.StringIO`, but in Python 3, it's an alias for
+ :class:`py3:io.BytesIO`.
+
+
+Renamed modules and attributes compatibility
+>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+.. module:: six.moves
+ :synopsis: Renamed modules and attributes compatibility
+
+Python 3 reorganized the standard library and moved several functions to
+different modules. Six provides a consistent interface to them through the fake
+:mod:`six.moves` module. For example, to load the module for parsing HTML on
+Python 2 or 3, write::
+
+ from six.moves import html_parser
+
+Similarly, to get the function to reload modules, which was moved from the
+builtin module to the ``imp`` module, use::
+
+ from six.moves import reload_module
+
+For the most part, :mod:`six.moves` aliases are the names of the modules in
+Python 3. When the new Python 3 name is a package, the components of the name
+are separated by underscores. For example, ``html.parser`` becomes
+``html_parser``. In some cases where several modules have been combined, the
+Python 2 name is retained. This is so the appropiate modules can be found when
+running on Python 2. For example, ``BaseHTTPServer`` which is in
+``http.server`` in Python 3 is aliased as ``BaseHTTPServer``.
+
+Some modules which had two implementations have been merged in Python 3. For
+example, ``cPickle`` no longer exists in Python 3; it was merged with
+``pickle``. In these cases, fetching the fast version will load the fast one on
+Python 2 and the merged module in Python 3.
+
+The :mod:`py2:urllib`, :mod:`py2:urllib2`, and :mod:`py2:urlparse` modules have
+been combined in the :mod:`py3:urllib` package in Python 3. The
+:mod:`six.moves.urllib` package is a version-independent location for this
+functionality; its structure mimics the structure of the Python 3
+:mod:`py3:urllib` package.
+
+.. note::
+
+ In order to make imports of the form::
+
+ from six.moves.cPickle import loads
+
+ work, six places special proxy objects in in :data:`py3:sys.modules`. These
+ proxies lazily load the underlying module when an attribute is fetched. This
+ will fail if the underlying module is not available in the Python
+ interpreter. For example, ``sys.modules["six.moves.winreg"].LoadKey`` would
+ fail on any non-Windows platform. Unfortunately, some applications try to
+ load attributes on every module in :data:`py3:sys.modules`. six mitigates
+ this problem for some applications by pretending attributes on unimportable
+ modules don't exist. This hack doesn't work in every case, though. If you are
+ encountering problems with the lazy modules and don't use any from imports
+ directly from ``six.moves`` modules, you can workaround the issue by removing
+ the six proxy modules::
+
+ d = [name for name in sys.modules if name.startswith("six.moves.")]
+ for name in d:
+ del sys.modules[name]
+
+Supported renames:
+
++------------------------------+-------------------------------------+-------------------------------------+
+| Name | Python 2 name | Python 3 name |
++==============================+=====================================+=====================================+
+| ``builtins`` | :mod:`py2:__builtin__` | :mod:`py3:builtins` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``configparser`` | :mod:`py2:ConfigParser` | :mod:`py3:configparser` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``copyreg`` | :mod:`py2:copy_reg` | :mod:`py3:copyreg` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``cPickle`` | :mod:`py2:cPickle` | :mod:`py3:pickle` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``cStringIO`` | :func:`py2:cStringIO.StringIO` | :class:`py3:io.StringIO` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``dbm_gnu`` | :func:`py2:gdbm` | :class:`py3:dbm.gnu` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``_dummy_thread`` | :mod:`py2:dummy_thread` | :mod:`py3:_dummy_thread` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``email_mime_multipart`` | :mod:`py2:email.MIMEMultipart` | :mod:`py3:email.mime.multipart` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``email_mime_nonmultipart`` | :mod:`py2:email.MIMENonMultipart` | :mod:`py3:email.mime.nonmultipart` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``email_mime_text`` | :mod:`py2:email.MIMEText` | :mod:`py3:email.mime.text` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``email_mime_base`` | :mod:`py2:email.MIMEBase` | :mod:`py3:email.mime.base` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``filter`` | :func:`py2:itertools.ifilter` | :func:`py3:filter` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``filterfalse`` | :func:`py2:itertools.ifilterfalse` | :func:`py3:itertools.filterfalse` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``http_cookiejar`` | :mod:`py2:cookielib` | :mod:`py3:http.cookiejar` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``http_cookies`` | :mod:`py2:Cookie` | :mod:`py3:http.cookies` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``html_entities`` | :mod:`py2:htmlentitydefs` | :mod:`py3:html.entities` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``html_parser`` | :mod:`py2:HTMLParser` | :mod:`py3:html.parser` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``http_client`` | :mod:`py2:httplib` | :mod:`py3:http.client` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``BaseHTTPServer`` | :mod:`py2:BaseHTTPServer` | :mod:`py3:http.server` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``CGIHTTPServer`` | :mod:`py2:CGIHTTPServer` | :mod:`py3:http.server` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``SimpleHTTPServer`` | :mod:`py2:SimpleHTTPServer` | :mod:`py3:http.server` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``input`` | :func:`py2:raw_input` | :func:`py3:input` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``intern`` | :func:`py2:intern` | :func:`py3:sys.intern` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``map`` | :func:`py2:itertools.imap` | :func:`py3:map` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``queue`` | :mod:`py2:Queue` | :mod:`py3:queue` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``range`` | :func:`py2:xrange` | :func:`py3:range` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``reduce`` | :func:`py2:reduce` | :func:`py3:functools.reduce` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``reload_module`` | :func:`py2:reload` | :func:`py3:imp.reload` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``reprlib`` | :mod:`py2:repr` | :mod:`py3:reprlib` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``shlex_quote`` | :mod:`py2:pipes.quote` | :mod:`py3:shlex.quote` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``socketserver`` | :mod:`py2:SocketServer` | :mod:`py3:socketserver` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``_thread`` | :mod:`py2:thread` | :mod:`py3:_thread` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter`` | :mod:`py2:Tkinter` | :mod:`py3:tkinter` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_dialog`` | :mod:`py2:Dialog` | :mod:`py3:tkinter.dialog` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_filedialog`` | :mod:`py2:FileDialog` | :mod:`py3:tkinter.FileDialog` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_scrolledtext`` | :mod:`py2:ScrolledText` | :mod:`py3:tkinter.scrolledtext` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_simpledialog`` | :mod:`py2:SimpleDialog` | :mod:`py3:tkinter.simpledialog` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_ttk`` | :mod:`py2:ttk` | :mod:`py3:tkinter.ttk` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_tix`` | :mod:`py2:Tix` | :mod:`py3:tkinter.tix` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_constants`` | :mod:`py2:Tkconstants` | :mod:`py3:tkinter.constants` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_dnd`` | :mod:`py2:Tkdnd` | :mod:`py3:tkinter.dnd` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_colorchooser`` | :mod:`py2:tkColorChooser` | :mod:`py3:tkinter.colorchooser` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_commondialog`` | :mod:`py2:tkCommonDialog` | :mod:`py3:tkinter.commondialog` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_tkfiledialog`` | :mod:`py2:tkFileDialog` | :mod:`py3:tkinter.filedialog` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_font`` | :mod:`py2:tkFont` | :mod:`py3:tkinter.font` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_messagebox`` | :mod:`py2:tkMessageBox` | :mod:`py3:tkinter.messagebox` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``tkinter_tksimpledialog`` | :mod:`py2:tkSimpleDialog` | :mod:`py3:tkinter.simpledialog` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``urllib.parse`` | See :mod:`six.moves.urllib.parse` | :mod:`py3:urllib.parse` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``urllib.error`` | See :mod:`six.moves.urllib.error` | :mod:`py3:urllib.error` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``urllib.request`` | See :mod:`six.moves.urllib.request` | :mod:`py3:urllib.request` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``urllib.response`` | See :mod:`six.moves.urllib.response`| :mod:`py3:urllib.response` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``urllib.robotparser`` | :mod:`py2:robotparser` | :mod:`py3:urllib.robotparser` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``urllib_robotparser`` | :mod:`py2:robotparser` | :mod:`py3:urllib.robotparser` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``UserDict`` | :class:`py2:UserDict.UserDict` | :class:`py3:collections.UserDict` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``UserList`` | :class:`py2:UserList.UserList` | :class:`py3:collections.UserList` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``UserString`` | :class:`py2:UserString.UserString` | :class:`py3:collections.UserString` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``winreg`` | :mod:`py2:_winreg` | :mod:`py3:winreg` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``xmlrpc_client`` | :mod:`py2:xmlrpclib` | :mod:`py3:xmlrpc.client` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``xmlrpc_server`` | :mod:`py2:SimpleXMLRPCServer` | :mod:`py3:xmlrpc.server` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``xrange`` | :func:`py2:xrange` | :func:`py3:range` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``zip`` | :func:`py2:itertools.izip` | :func:`py3:zip` |
++------------------------------+-------------------------------------+-------------------------------------+
+| ``zip_longest`` | :func:`py2:itertools.izip_longest` | :func:`py3:itertools.zip_longest` |
++------------------------------+-------------------------------------+-------------------------------------+
+
+urllib parse
+<<<<<<<<<<<<
+
+.. module:: six.moves.urllib.parse
+ :synopsis: Stuff from :mod:`py2:urlparse` and :mod:`py2:urllib` in Python 2 and :mod:`py3:urllib.parse` in Python 3
+
+Contains functions from Python 3's :mod:`py3:urllib.parse` and Python 2's:
+
+:mod:`py2:urlparse`:
+
+* :func:`py2:urlparse.ParseResult`
+* :func:`py2:urlparse.SplitResult`
+* :func:`py2:urlparse.urlparse`
+* :func:`py2:urlparse.urlunparse`
+* :func:`py2:urlparse.parse_qs`
+* :func:`py2:urlparse.parse_qsl`
+* :func:`py2:urlparse.urljoin`
+* :func:`py2:urlparse.urldefrag`
+* :func:`py2:urlparse.urlsplit`
+* :func:`py2:urlparse.urlunsplit`
+* :func:`py2:urlparse.splitquery`
+* :func:`py2:urlparse.uses_fragment`
+* :func:`py2:urlparse.uses_netloc`
+* :func:`py2:urlparse.uses_params`
+* :func:`py2:urlparse.uses_query`
+* :func:`py2:urlparse.uses_relative`
+
+and :mod:`py2:urllib`:
+
+* :func:`py2:urllib.quote`
+* :func:`py2:urllib.quote_plus`
+* :func:`py2:urllib.splittag`
+* :func:`py2:urllib.splituser`
+* :func:`py2:urllib.unquote`
+* :func:`py2:urllib.unquote_plus`
+* :func:`py2:urllib.urlencode`
+
+
+urllib error
+<<<<<<<<<<<<
+
+.. module:: six.moves.urllib.error
+ :synopsis: Stuff from :mod:`py2:urllib` and :mod:`py2:urllib2` in Python 2 and :mod:`py3:urllib.error` in Python 3
+
+Contains exceptions from Python 3's :mod:`py3:urllib.error` and Python 2's:
+
+:mod:`py2:urllib`:
+
+* :exc:`py2:urllib.ContentTooShortError`
+
+and :mod:`py2:urllib2`:
+
+* :exc:`py2:urllib2.URLError`
+* :exc:`py2:urllib2.HTTPError`
+
+
+urllib request
+<<<<<<<<<<<<<<
+
+.. module:: six.moves.urllib.request
+ :synopsis: Stuff from :mod:`py2:urllib` and :mod:`py2:urllib2` in Python 2 and :mod:`py3:urllib.request` in Python 3
+
+Contains items from Python 3's :mod:`py3:urllib.request` and Python 2's:
+
+:mod:`py2:urllib`:
+
+* :func:`py2:urllib.pathname2url`
+* :func:`py2:urllib.url2pathname`
+* :func:`py2:urllib.getproxies`
+* :func:`py2:urllib.urlretrieve`
+* :func:`py2:urllib.urlcleanup`
+* :class:`py2:urllib.URLopener`
+* :class:`py2:urllib.FancyURLopener`
+* :func:`py2:urllib.proxy_bypass`
+
+and :mod:`py2:urllib2`:
+
+* :func:`py2:urllib2.urlopen`
+* :func:`py2:urllib2.install_opener`
+* :func:`py2:urllib2.build_opener`
+* :class:`py2:urllib2.Request`
+* :class:`py2:urllib2.OpenerDirector`
+* :class:`py2:urllib2.HTTPDefaultErrorHandler`
+* :class:`py2:urllib2.HTTPRedirectHandler`
+* :class:`py2:urllib2.HTTPCookieProcessor`
+* :class:`py2:urllib2.ProxyHandler`
+* :class:`py2:urllib2.BaseHandler`
+* :class:`py2:urllib2.HTTPPasswordMgr`
+* :class:`py2:urllib2.HTTPPasswordMgrWithDefaultRealm`
+* :class:`py2:urllib2.AbstractBasicAuthHandler`
+* :class:`py2:urllib2.HTTPBasicAuthHandler`
+* :class:`py2:urllib2.ProxyBasicAuthHandler`
+* :class:`py2:urllib2.AbstractDigestAuthHandler`
+* :class:`py2:urllib2.HTTPDigestAuthHandler`
+* :class:`py2:urllib2.ProxyDigestAuthHandler`
+* :class:`py2:urllib2.HTTPHandler`
+* :class:`py2:urllib2.HTTPSHandler`
+* :class:`py2:urllib2.FileHandler`
+* :class:`py2:urllib2.FTPHandler`
+* :class:`py2:urllib2.CacheFTPHandler`
+* :class:`py2:urllib2.UnknownHandler`
+* :class:`py2:urllib2.HTTPErrorProcessor`
+
+
+urllib response
+<<<<<<<<<<<<<<<
+
+.. module:: six.moves.urllib.response
+ :synopsis: Stuff from :mod:`py2:urllib` in Python 2 and :mod:`py3:urllib.response` in Python 3
+
+Contains classes from Python 3's :mod:`py3:urllib.response` and Python 2's:
+
+:mod:`py2:urllib`:
+
+* :class:`py2:urllib.addbase`
+* :class:`py2:urllib.addclosehook`
+* :class:`py2:urllib.addinfo`
+* :class:`py2:urllib.addinfourl`
+
+
+Advanced - Customizing renames
+<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+
+.. currentmodule:: six
+
+It is possible to add additional names to the :mod:`six.moves` namespace.
+
+
+.. function:: add_move(item)
+
+ Add *item* to the :mod:`six.moves` mapping. *item* should be a
+ :class:`MovedAttribute` or :class:`MovedModule` instance.
+
+
+.. function:: remove_move(name)
+
+ Remove the :mod:`six.moves` mapping called *name*. *name* should be a
+ string.
+
+
+Instances of the following classes can be passed to :func:`add_move`. Neither
+have any public members.
+
+
+.. class:: MovedModule(name, old_mod, new_mod)
+
+ Create a mapping for :mod:`six.moves` called *name* that references different
+ modules in Python 2 and 3. *old_mod* is the name of the Python 2 module.
+ *new_mod* is the name of the Python 3 module.
+
+
+.. class:: MovedAttribute(name, old_mod, new_mod, old_attr=None, new_attr=None)
+
+ Create a mapping for :mod:`six.moves` called *name* that references different
+ attributes in Python 2 and 3. *old_mod* is the name of the Python 2 module.
+ *new_mod* is the name of the Python 3 module. If *new_attr* is not given, it
+ defaults to *old_attr*. If neither is given, they both default to *name*.
diff --git a/testing/web-platform/tests/tools/six/setup.cfg b/testing/web-platform/tests/tools/six/setup.cfg
new file mode 100644
index 000000000..5e4090017
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/setup.cfg
@@ -0,0 +1,2 @@
+[wheel]
+universal = 1
diff --git a/testing/web-platform/tests/tools/six/setup.py b/testing/web-platform/tests/tools/six/setup.py
new file mode 100644
index 000000000..b0cca52d4
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/setup.py
@@ -0,0 +1,32 @@
+from __future__ import with_statement
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+import six
+
+six_classifiers = [
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Topic :: Software Development :: Libraries",
+ "Topic :: Utilities",
+]
+
+with open("README", "r") as fp:
+ six_long_description = fp.read()
+
+setup(name="six",
+ version=six.__version__,
+ author="Benjamin Peterson",
+ author_email="benjamin@python.org",
+ url="http://pypi.python.org/pypi/six/",
+ py_modules=["six"],
+ description="Python 2 and 3 compatibility utilities",
+ long_description=six_long_description,
+ license="MIT",
+ classifiers=six_classifiers
+ )
diff --git a/testing/web-platform/tests/tools/six/six.py b/testing/web-platform/tests/tools/six/six.py
new file mode 100644
index 000000000..686c20a15
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/six.py
@@ -0,0 +1,787 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2014 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.8.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+ MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return iter(d.iterkeys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.itervalues(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.iteritems(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.iterlists(**kw))
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+ def u(s):
+ return s
+ unichr = chr
+ if sys.version_info[1] <= 1:
+ def int2byte(i):
+ return bytes((i,))
+ else:
+ # This is about 2x faster than the implementation above on 3.2+
+ int2byte = operator.methodcaller("to_bytes", 1, "big")
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+ def byte2int(bs):
+ return ord(bs[0])
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/testing/web-platform/tests/tools/six/test_six.py b/testing/web-platform/tests/tools/six/test_six.py
new file mode 100644
index 000000000..d5555e8ac
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/test_six.py
@@ -0,0 +1,787 @@
+import operator
+import sys
+import types
+
+import py
+
+import six
+
+
+def test_add_doc():
+ def f():
+ """Icky doc"""
+ pass
+ six._add_doc(f, """New doc""")
+ assert f.__doc__ == "New doc"
+
+
+def test_import_module():
+ from logging import handlers
+ m = six._import_module("logging.handlers")
+ assert m is handlers
+
+
+def test_integer_types():
+ assert isinstance(1, six.integer_types)
+ assert isinstance(-1, six.integer_types)
+ assert isinstance(six.MAXSIZE + 23, six.integer_types)
+ assert not isinstance(.1, six.integer_types)
+
+
+def test_string_types():
+ assert isinstance("hi", six.string_types)
+ assert isinstance(six.u("hi"), six.string_types)
+ assert issubclass(six.text_type, six.string_types)
+
+
+def test_class_types():
+ class X:
+ pass
+ class Y(object):
+ pass
+ assert isinstance(X, six.class_types)
+ assert isinstance(Y, six.class_types)
+ assert not isinstance(X(), six.class_types)
+
+
+def test_text_type():
+ assert type(six.u("hi")) is six.text_type
+
+
+def test_binary_type():
+ assert type(six.b("hi")) is six.binary_type
+
+
+def test_MAXSIZE():
+ try:
+ # This shouldn't raise an overflow error.
+ six.MAXSIZE.__index__()
+ except AttributeError:
+ # Before Python 2.6.
+ pass
+ py.test.raises(
+ (ValueError, OverflowError),
+ operator.mul, [None], six.MAXSIZE + 1)
+
+
+def test_lazy():
+ if six.PY3:
+ html_name = "html.parser"
+ else:
+ html_name = "HTMLParser"
+ assert html_name not in sys.modules
+ mod = six.moves.html_parser
+ assert sys.modules[html_name] is mod
+ assert "htmlparser" not in six._MovedItems.__dict__
+
+
+try:
+ import _tkinter
+except ImportError:
+ have_tkinter = False
+else:
+ have_tkinter = True
+
+have_gdbm = True
+try:
+ import gdbm
+except ImportError:
+ try:
+ import dbm.gnu
+ except ImportError:
+ have_gdbm = False
+
+@py.test.mark.parametrize("item_name",
+ [item.name for item in six._moved_attributes])
+def test_move_items(item_name):
+ """Ensure that everything loads correctly."""
+ try:
+ item = getattr(six.moves, item_name)
+ if isinstance(item, types.ModuleType):
+ __import__("six.moves." + item_name)
+ except AttributeError:
+ if item_name == "zip_longest" and sys.version_info < (2, 6):
+ py.test.skip("zip_longest only available on 2.6+")
+ except ImportError:
+ if item_name == "winreg" and not sys.platform.startswith("win"):
+ py.test.skip("Windows only module")
+ if item_name.startswith("tkinter"):
+ if not have_tkinter:
+ py.test.skip("requires tkinter")
+ if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
+ py.test.skip("ttk only available on 2.7+")
+ if item_name.startswith("dbm_gnu") and not have_gdbm:
+ py.test.skip("requires gdbm")
+ raise
+ if sys.version_info[:2] >= (2, 6):
+ assert item_name in dir(six.moves)
+
+
+@py.test.mark.parametrize("item_name",
+ [item.name for item in six._urllib_parse_moved_attributes])
+def test_move_items_urllib_parse(item_name):
+ """Ensure that everything loads correctly."""
+ if item_name == "ParseResult" and sys.version_info < (2, 5):
+ py.test.skip("ParseResult is only found on 2.5+")
+ if item_name in ("parse_qs", "parse_qsl") and sys.version_info < (2, 6):
+ py.test.skip("parse_qs[l] is new in 2.6")
+ if sys.version_info[:2] >= (2, 6):
+ assert item_name in dir(six.moves.urllib.parse)
+ getattr(six.moves.urllib.parse, item_name)
+
+
+@py.test.mark.parametrize("item_name",
+ [item.name for item in six._urllib_error_moved_attributes])
+def test_move_items_urllib_error(item_name):
+ """Ensure that everything loads correctly."""
+ if sys.version_info[:2] >= (2, 6):
+ assert item_name in dir(six.moves.urllib.error)
+ getattr(six.moves.urllib.error, item_name)
+
+
+@py.test.mark.parametrize("item_name",
+ [item.name for item in six._urllib_request_moved_attributes])
+def test_move_items_urllib_request(item_name):
+ """Ensure that everything loads correctly."""
+ if sys.version_info[:2] >= (2, 6):
+ assert item_name in dir(six.moves.urllib.request)
+ getattr(six.moves.urllib.request, item_name)
+
+
+@py.test.mark.parametrize("item_name",
+ [item.name for item in six._urllib_response_moved_attributes])
+def test_move_items_urllib_response(item_name):
+ """Ensure that everything loads correctly."""
+ if sys.version_info[:2] >= (2, 6):
+ assert item_name in dir(six.moves.urllib.response)
+ getattr(six.moves.urllib.response, item_name)
+
+
+@py.test.mark.parametrize("item_name",
+ [item.name for item in six._urllib_robotparser_moved_attributes])
+def test_move_items_urllib_robotparser(item_name):
+ """Ensure that everything loads correctly."""
+ if sys.version_info[:2] >= (2, 6):
+ assert item_name in dir(six.moves.urllib.robotparser)
+ getattr(six.moves.urllib.robotparser, item_name)
+
+
+def test_import_moves_error_1():
+ from six.moves.urllib.parse import urljoin
+ from six import moves
+ # In 1.4.1: AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urljoin'
+ assert moves.urllib.parse.urljoin
+
+
+def test_import_moves_error_2():
+ from six import moves
+ assert moves.urllib.parse.urljoin
+ # In 1.4.1: ImportError: cannot import name urljoin
+ from six.moves.urllib.parse import urljoin
+
+
+def test_import_moves_error_3():
+ from six.moves.urllib.parse import urljoin
+ # In 1.4.1: ImportError: cannot import name urljoin
+ from six.moves.urllib_parse import urljoin
+
+
+def test_from_imports():
+ from six.moves.queue import Queue
+ assert isinstance(Queue, six.class_types)
+ from six.moves.configparser import ConfigParser
+ assert isinstance(ConfigParser, six.class_types)
+
+
+def test_filter():
+ from six.moves import filter
+ f = filter(lambda x: x % 2, range(10))
+ assert six.advance_iterator(f) == 1
+
+
+def test_filter_false():
+ from six.moves import filterfalse
+ f = filterfalse(lambda x: x % 3, range(10))
+ assert six.advance_iterator(f) == 0
+ assert six.advance_iterator(f) == 3
+ assert six.advance_iterator(f) == 6
+
+def test_map():
+ from six.moves import map
+ assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
+
+
+def test_zip():
+ from six.moves import zip
+ assert six.advance_iterator(zip(range(2), range(2))) == (0, 0)
+
+
+@py.test.mark.skipif("sys.version_info < (2, 6)")
+def test_zip_longest():
+ from six.moves import zip_longest
+ it = zip_longest(range(2), range(1))
+
+ assert six.advance_iterator(it) == (0, 0)
+ assert six.advance_iterator(it) == (1, None)
+
+
+class TestCustomizedMoves:
+
+ def teardown_method(self, meth):
+ try:
+ del six._MovedItems.spam
+ except AttributeError:
+ pass
+ try:
+ del six.moves.__dict__["spam"]
+ except KeyError:
+ pass
+
+
+ def test_moved_attribute(self):
+ attr = six.MovedAttribute("spam", "foo", "bar")
+ if six.PY3:
+ assert attr.mod == "bar"
+ else:
+ assert attr.mod == "foo"
+ assert attr.attr == "spam"
+ attr = six.MovedAttribute("spam", "foo", "bar", "lemma")
+ assert attr.attr == "lemma"
+ attr = six.MovedAttribute("spam", "foo", "bar", "lemma", "theorm")
+ if six.PY3:
+ assert attr.attr == "theorm"
+ else:
+ assert attr.attr == "lemma"
+
+
+ def test_moved_module(self):
+ attr = six.MovedModule("spam", "foo")
+ if six.PY3:
+ assert attr.mod == "spam"
+ else:
+ assert attr.mod == "foo"
+ attr = six.MovedModule("spam", "foo", "bar")
+ if six.PY3:
+ assert attr.mod == "bar"
+ else:
+ assert attr.mod == "foo"
+
+
+ def test_custom_move_module(self):
+ attr = six.MovedModule("spam", "six", "six")
+ six.add_move(attr)
+ six.remove_move("spam")
+ assert not hasattr(six.moves, "spam")
+ attr = six.MovedModule("spam", "six", "six")
+ six.add_move(attr)
+ from six.moves import spam
+ assert spam is six
+ six.remove_move("spam")
+ assert not hasattr(six.moves, "spam")
+
+
+ def test_custom_move_attribute(self):
+ attr = six.MovedAttribute("spam", "six", "six", "u", "u")
+ six.add_move(attr)
+ six.remove_move("spam")
+ assert not hasattr(six.moves, "spam")
+ attr = six.MovedAttribute("spam", "six", "six", "u", "u")
+ six.add_move(attr)
+ from six.moves import spam
+ assert spam is six.u
+ six.remove_move("spam")
+ assert not hasattr(six.moves, "spam")
+
+
+ def test_empty_remove(self):
+ py.test.raises(AttributeError, six.remove_move, "eggs")
+
+
+def test_get_unbound_function():
+ class X(object):
+ def m(self):
+ pass
+ assert six.get_unbound_function(X.m) is X.__dict__["m"]
+
+
+def test_get_method_self():
+ class X(object):
+ def m(self):
+ pass
+ x = X()
+ assert six.get_method_self(x.m) is x
+ py.test.raises(AttributeError, six.get_method_self, 42)
+
+
+def test_get_method_function():
+ class X(object):
+ def m(self):
+ pass
+ x = X()
+ assert six.get_method_function(x.m) is X.__dict__["m"]
+ py.test.raises(AttributeError, six.get_method_function, hasattr)
+
+
+def test_get_function_closure():
+ def f():
+ x = 42
+ def g():
+ return x
+ return g
+ cell = six.get_function_closure(f())[0]
+ assert type(cell).__name__ == "cell"
+
+
+def test_get_function_code():
+ def f():
+ pass
+ assert isinstance(six.get_function_code(f), types.CodeType)
+ if not hasattr(sys, "pypy_version_info"):
+ py.test.raises(AttributeError, six.get_function_code, hasattr)
+
+
+def test_get_function_defaults():
+ def f(x, y=3, b=4):
+ pass
+ assert six.get_function_defaults(f) == (3, 4)
+
+
+def test_get_function_globals():
+ def f():
+ pass
+ assert six.get_function_globals(f) is globals()
+
+
+def test_dictionary_iterators(monkeypatch):
+ def stock_method_name(iterwhat):
+ """Given a method suffix like "lists" or "values", return the name
+ of the dict method that delivers those on the version of Python
+ we're running in."""
+ if six.PY3:
+ return iterwhat
+ return 'iter' + iterwhat
+
+ class MyDict(dict):
+ if not six.PY3:
+ def lists(self, **kw):
+ return [1, 2, 3]
+ def iterlists(self, **kw):
+ return iter([1, 2, 3])
+ f = MyDict.iterlists
+ del MyDict.iterlists
+ setattr(MyDict, stock_method_name('lists'), f)
+
+ d = MyDict(zip(range(10), reversed(range(10))))
+ for name in "keys", "values", "items", "lists":
+ meth = getattr(six, "iter" + name)
+ it = meth(d)
+ assert not isinstance(it, list)
+ assert list(it) == list(getattr(d, name)())
+ py.test.raises(StopIteration, six.advance_iterator, it)
+ record = []
+ def with_kw(*args, **kw):
+ record.append(kw["kw"])
+ return old(*args)
+ old = getattr(MyDict, stock_method_name(name))
+ monkeypatch.setattr(MyDict, stock_method_name(name), with_kw)
+ meth(d, kw=42)
+ assert record == [42]
+ monkeypatch.undo()
+
+
+@py.test.mark.skipif(sys.version_info[:2] < (2, 7),
+ reason="view methods on dictionaries only available on 2.7+")
+def test_dictionary_views():
+ def stock_method_name(viewwhat):
+ """Given a method suffix like "keys" or "values", return the name
+ of the dict method that delivers those on the version of Python
+ we're running in."""
+ if six.PY3:
+ return viewwhat
+ return 'view' + viewwhat
+
+ d = dict(zip(range(10), (range(11, 20))))
+ for name in "keys", "values", "items":
+ meth = getattr(six, "view" + name)
+ view = meth(d)
+ assert set(view) == set(getattr(d, name)())
+
+
+def test_advance_iterator():
+ assert six.next is six.advance_iterator
+ l = [1, 2]
+ it = iter(l)
+ assert six.next(it) == 1
+ assert six.next(it) == 2
+ py.test.raises(StopIteration, six.next, it)
+ py.test.raises(StopIteration, six.next, it)
+
+
+def test_iterator():
+ class myiter(six.Iterator):
+ def __next__(self):
+ return 13
+ assert six.advance_iterator(myiter()) == 13
+ class myitersub(myiter):
+ def __next__(self):
+ return 14
+ assert six.advance_iterator(myitersub()) == 14
+
+
+def test_callable():
+ class X:
+ def __call__(self):
+ pass
+ def method(self):
+ pass
+ assert six.callable(X)
+ assert six.callable(X())
+ assert six.callable(test_callable)
+ assert six.callable(hasattr)
+ assert six.callable(X.method)
+ assert six.callable(X().method)
+ assert not six.callable(4)
+ assert not six.callable("string")
+
+
+def test_create_bound_method():
+ class X(object):
+ pass
+ def f(self):
+ return self
+ x = X()
+ b = six.create_bound_method(f, x)
+ assert isinstance(b, types.MethodType)
+ assert b() is x
+
+
+if six.PY3:
+
+ def test_b():
+ data = six.b("\xff")
+ assert isinstance(data, bytes)
+ assert len(data) == 1
+ assert data == bytes([255])
+
+
+ def test_u():
+ s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
+ assert isinstance(s, str)
+ assert s == "hi \u0439 \U00000439 \\ \\\\ \n"
+
+else:
+
+ def test_b():
+ data = six.b("\xff")
+ assert isinstance(data, str)
+ assert len(data) == 1
+ assert data == "\xff"
+
+
+ def test_u():
+ s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
+ assert isinstance(s, unicode)
+ assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8")
+
+
+def test_u_escapes():
+ s = six.u("\u1234")
+ assert len(s) == 1
+
+
+def test_unichr():
+ assert six.u("\u1234") == six.unichr(0x1234)
+ assert type(six.u("\u1234")) is type(six.unichr(0x1234))
+
+
+def test_int2byte():
+ assert six.int2byte(3) == six.b("\x03")
+ py.test.raises((OverflowError, ValueError), six.int2byte, 256)
+
+
+def test_byte2int():
+ assert six.byte2int(six.b("\x03")) == 3
+ assert six.byte2int(six.b("\x03\x04")) == 3
+ py.test.raises(IndexError, six.byte2int, six.b(""))
+
+
+def test_bytesindex():
+ assert six.indexbytes(six.b("hello"), 3) == ord("l")
+
+
+def test_bytesiter():
+ it = six.iterbytes(six.b("hi"))
+ assert six.next(it) == ord("h")
+ assert six.next(it) == ord("i")
+ py.test.raises(StopIteration, six.next, it)
+
+
+def test_StringIO():
+ fp = six.StringIO()
+ fp.write(six.u("hello"))
+ assert fp.getvalue() == six.u("hello")
+
+
+def test_BytesIO():
+ fp = six.BytesIO()
+ fp.write(six.b("hello"))
+ assert fp.getvalue() == six.b("hello")
+
+
+def test_exec_():
+ def f():
+ l = []
+ six.exec_("l.append(1)")
+ assert l == [1]
+ f()
+ ns = {}
+ six.exec_("x = 42", ns)
+ assert ns["x"] == 42
+ glob = {}
+ loc = {}
+ six.exec_("global y; y = 42; x = 12", glob, loc)
+ assert glob["y"] == 42
+ assert "x" not in glob
+ assert loc["x"] == 12
+ assert "y" not in loc
+
+
+def test_reraise():
+ def get_next(tb):
+ if six.PY3:
+ return tb.tb_next.tb_next
+ else:
+ return tb.tb_next
+ e = Exception("blah")
+ try:
+ raise e
+ except Exception:
+ tp, val, tb = sys.exc_info()
+ try:
+ six.reraise(tp, val, tb)
+ except Exception:
+ tp2, value2, tb2 = sys.exc_info()
+ assert tp2 is Exception
+ assert value2 is e
+ assert tb is get_next(tb2)
+ try:
+ six.reraise(tp, val)
+ except Exception:
+ tp2, value2, tb2 = sys.exc_info()
+ assert tp2 is Exception
+ assert value2 is e
+ assert tb2 is not tb
+ try:
+ six.reraise(tp, val, tb2)
+ except Exception:
+ tp2, value2, tb3 = sys.exc_info()
+ assert tp2 is Exception
+ assert value2 is e
+ assert get_next(tb3) is tb2
+ try:
+ six.reraise(tp, None, tb)
+ except Exception:
+ tp2, value2, tb2 = sys.exc_info()
+ assert tp2 is Exception
+ assert value2 is not val
+ assert isinstance(value2, Exception)
+ assert tb is get_next(tb2)
+
+
+def test_raise_from():
+ try:
+ try:
+ raise Exception("blah")
+ except Exception:
+ ctx = sys.exc_info()[1]
+ f = Exception("foo")
+ six.raise_from(f, None)
+ except Exception:
+ tp, val, tb = sys.exc_info()
+ if sys.version_info[:2] > (3, 0):
+ # We should have done a raise f from None equivalent.
+ assert val.__cause__ is None
+ assert val.__context__ is ctx
+ if sys.version_info[:2] >= (3, 3):
+ # And that should suppress the context on the exception.
+ assert val.__suppress_context__
+ # For all versions the outer exception should have raised successfully.
+ assert str(val) == "foo"
+
+
+def test_print_():
+ save = sys.stdout
+ out = sys.stdout = six.moves.StringIO()
+ try:
+ six.print_("Hello,", "person!")
+ finally:
+ sys.stdout = save
+ assert out.getvalue() == "Hello, person!\n"
+ out = six.StringIO()
+ six.print_("Hello,", "person!", file=out)
+ assert out.getvalue() == "Hello, person!\n"
+ out = six.StringIO()
+ six.print_("Hello,", "person!", file=out, end="")
+ assert out.getvalue() == "Hello, person!"
+ out = six.StringIO()
+ six.print_("Hello,", "person!", file=out, sep="X")
+ assert out.getvalue() == "Hello,Xperson!\n"
+ out = six.StringIO()
+ six.print_(six.u("Hello,"), six.u("person!"), file=out)
+ result = out.getvalue()
+ assert isinstance(result, six.text_type)
+ assert result == six.u("Hello, person!\n")
+ six.print_("Hello", file=None) # This works.
+ out = six.StringIO()
+ six.print_(None, file=out)
+ assert out.getvalue() == "None\n"
+
+
+@py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
+def test_print_encoding(monkeypatch):
+ # Fool the type checking in print_.
+ monkeypatch.setattr(six, "file", six.BytesIO, raising=False)
+ out = six.BytesIO()
+ out.encoding = "utf-8"
+ out.errors = None
+ six.print_(six.u("\u053c"), end="", file=out)
+ assert out.getvalue() == six.b("\xd4\xbc")
+ out = six.BytesIO()
+ out.encoding = "ascii"
+ out.errors = "strict"
+ py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out)
+ out.errors = "backslashreplace"
+ six.print_(six.u("\u053c"), end="", file=out)
+ assert out.getvalue() == six.b("\\u053c")
+
+
+def test_print_exceptions():
+ py.test.raises(TypeError, six.print_, x=3)
+ py.test.raises(TypeError, six.print_, end=3)
+ py.test.raises(TypeError, six.print_, sep=42)
+
+
+def test_with_metaclass():
+ class Meta(type):
+ pass
+ class X(six.with_metaclass(Meta)):
+ pass
+ assert type(X) is Meta
+ assert issubclass(X, object)
+ class Base(object):
+ pass
+ class X(six.with_metaclass(Meta, Base)):
+ pass
+ assert type(X) is Meta
+ assert issubclass(X, Base)
+ class Base2(object):
+ pass
+ class X(six.with_metaclass(Meta, Base, Base2)):
+ pass
+ assert type(X) is Meta
+ assert issubclass(X, Base)
+ assert issubclass(X, Base2)
+ assert X.__mro__ == (X, Base, Base2, object)
+
+
+def test_wraps():
+ def f(g):
+ @six.wraps(g)
+ def w():
+ return 42
+ return w
+ def k():
+ pass
+ original_k = k
+ k = f(f(k))
+ assert hasattr(k, '__wrapped__')
+ k = k.__wrapped__
+ assert hasattr(k, '__wrapped__')
+ k = k.__wrapped__
+ assert k is original_k
+ assert not hasattr(k, '__wrapped__')
+
+ def f(g, assign, update):
+ def w():
+ return 42
+ w.glue = {"foo" : "bar"}
+ return six.wraps(g, assign, update)(w)
+ k.glue = {"melon" : "egg"}
+ k.turnip = 43
+ k = f(k, ["turnip"], ["glue"])
+ assert k.__name__ == "w"
+ assert k.turnip == 43
+ assert k.glue == {"melon" : "egg", "foo" : "bar"}
+
+
+def test_add_metaclass():
+ class Meta(type):
+ pass
+ class X:
+ "success"
+ X = six.add_metaclass(Meta)(X)
+ assert type(X) is Meta
+ assert issubclass(X, object)
+ assert X.__module__ == __name__
+ assert X.__doc__ == "success"
+ class Base(object):
+ pass
+ class X(Base):
+ pass
+ X = six.add_metaclass(Meta)(X)
+ assert type(X) is Meta
+ assert issubclass(X, Base)
+ class Base2(object):
+ pass
+ class X(Base, Base2):
+ pass
+ X = six.add_metaclass(Meta)(X)
+ assert type(X) is Meta
+ assert issubclass(X, Base)
+ assert issubclass(X, Base2)
+
+ # Test a second-generation subclass of a type.
+ class Meta1(type):
+ m1 = "m1"
+ class Meta2(Meta1):
+ m2 = "m2"
+ class Base:
+ b = "b"
+ Base = six.add_metaclass(Meta1)(Base)
+ class X(Base):
+ x = "x"
+ X = six.add_metaclass(Meta2)(X)
+ assert type(X) is Meta2
+ assert issubclass(X, Base)
+ assert type(Base) is Meta1
+ assert "__dict__" not in vars(X)
+ instance = X()
+ instance.attr = "test"
+ assert vars(instance) == {"attr": "test"}
+ assert instance.b == Base.b
+ assert instance.x == X.x
+
+ # Test a class with slots.
+ class MySlots(object):
+ __slots__ = ["a", "b"]
+ MySlots = six.add_metaclass(Meta1)(MySlots)
+
+ assert MySlots.__slots__ == ["a", "b"]
+ instance = MySlots()
+ instance.a = "foo"
+ py.test.raises(AttributeError, setattr, instance, "c", "baz")
+
+ # Test a class with string for slots.
+ class MyStringSlots(object):
+ __slots__ = "ab"
+ MyStringSlots = six.add_metaclass(Meta1)(MyStringSlots)
+ assert MyStringSlots.__slots__ == "ab"
+ instance = MyStringSlots()
+ instance.ab = "foo"
+ py.test.raises(AttributeError, setattr, instance, "a", "baz")
+ py.test.raises(AttributeError, setattr, instance, "b", "baz")
+
+ class MySlotsWeakref(object):
+ __slots__ = "__weakref__",
+ MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
+ assert type(MySlotsWeakref) is Meta
diff --git a/testing/web-platform/tests/tools/six/tox.ini b/testing/web-platform/tests/tools/six/tox.ini
new file mode 100644
index 000000000..b29b31a69
--- /dev/null
+++ b/testing/web-platform/tests/tools/six/tox.ini
@@ -0,0 +1,12 @@
+[tox]
+envlist=py25,py26,py27,py31,py32,py33,py34,pypy
+indexserver=
+ default = http://pypi.python.org/simple
+ testrun = http://pypi.testrun.org
+
+[testenv]
+deps=pytest
+commands= py.test -rfsxX {posargs}
+
+[pytest]
+minversion=2.2.0
diff --git a/testing/web-platform/tests/tools/sslutils/__init__.py b/testing/web-platform/tests/tools/sslutils/__init__.py
new file mode 100644
index 000000000..e46be851d
--- /dev/null
+++ b/testing/web-platform/tests/tools/sslutils/__init__.py
@@ -0,0 +1,9 @@
+import openssl
+import pregenerated
+from base import NoSSLEnvironment
+from openssl import OpenSSLEnvironment
+from pregenerated import PregeneratedSSLEnvironment
+
+environments = {"none": NoSSLEnvironment,
+ "openssl": OpenSSLEnvironment,
+ "pregenerated": PregeneratedSSLEnvironment}
diff --git a/testing/web-platform/tests/tools/sslutils/base.py b/testing/web-platform/tests/tools/sslutils/base.py
new file mode 100644
index 000000000..e78e1385c
--- /dev/null
+++ b/testing/web-platform/tests/tools/sslutils/base.py
@@ -0,0 +1,23 @@
+def get_logger(name="ssl"):
+ logger = structured.get_default_logger(name)
+ if logger is None:
+ logger = structured.structuredlog.StructuredLogger(name)
+ return logger
+
+class NoSSLEnvironment(object):
+ ssl_enabled = False
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+ def host_cert_path(self, host):
+ return None, None
+
+ def ca_cert_path(self):
+ return None
diff --git a/testing/web-platform/tests/tools/sslutils/openssl.py b/testing/web-platform/tests/tools/sslutils/openssl.py
new file mode 100644
index 000000000..26ed71135
--- /dev/null
+++ b/testing/web-platform/tests/tools/sslutils/openssl.py
@@ -0,0 +1,405 @@
+import functools
+import os
+import random
+import shutil
+import subprocess
+import tempfile
+from datetime import datetime
+
+class OpenSSL(object):
+ def __init__(self, logger, binary, base_path, conf_path, hosts, duration,
+ base_conf_path=None):
+ """Context manager for interacting with OpenSSL.
+ Creates a config file for the duration of the context.
+
+ :param logger: stdlib logger or python structured logger
+ :param binary: path to openssl binary
+ :param base_path: path to directory for storing certificates
+ :param conf_path: path for configuration file storing configuration data
+ :param hosts: list of hosts to include in configuration (or None if not
+ generating host certificates)
+ :param duration: Certificate duration in days"""
+
+ self.base_path = base_path
+ self.binary = binary
+ self.conf_path = conf_path
+ self.base_conf_path = base_conf_path
+ self.logger = logger
+ self.proc = None
+ self.cmd = []
+ self.hosts = hosts
+ self.duration = duration
+
+ def __enter__(self):
+ with open(self.conf_path, "w") as f:
+ f.write(get_config(self.base_path, self.hosts, self.duration))
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ os.unlink(self.conf_path)
+
+ def log(self, line):
+ if hasattr(self.logger, "process_output"):
+ self.logger.process_output(self.proc.pid if self.proc is not None else None,
+ line.decode("utf8", "replace"),
+ command=" ".join(self.cmd))
+ else:
+ self.logger.debug(line)
+
+ def __call__(self, cmd, *args, **kwargs):
+ """Run a command using OpenSSL in the current context.
+
+ :param cmd: The openssl subcommand to run
+ :param *args: Additional arguments to pass to the command
+ """
+ self.cmd = [self.binary, cmd]
+ if cmd != "x509":
+ self.cmd += ["-config", self.conf_path]
+ self.cmd += list(args)
+
+ env = os.environ.copy()
+ if self.base_conf_path is not None:
+ env["OPENSSL_CONF"] = self.base_conf_path.encode("utf8")
+
+ self.proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ env=env)
+ stdout, stderr = self.proc.communicate()
+ self.log(stdout)
+ if self.proc.returncode != 0:
+ raise subprocess.CalledProcessError(self.proc.returncode, self.cmd,
+ output=stdout)
+
+ self.cmd = []
+ self.proc = None
+ return stdout
+
+
+def make_subject(common_name,
+ country=None,
+ state=None,
+ locality=None,
+ organization=None,
+ organization_unit=None):
+ args = [("country", "C"),
+ ("state", "ST"),
+ ("locality", "L"),
+ ("organization", "O"),
+ ("organization_unit", "OU"),
+ ("common_name", "CN")]
+
+ rv = []
+
+ for var, key in args:
+ value = locals()[var]
+ if value is not None:
+ rv.append("/%s=%s" % (key, value.replace("/", "\\/")))
+
+ return "".join(rv)
+
+def make_alt_names(hosts):
+ rv = []
+ for name in hosts:
+ rv.append("DNS:%s" % name)
+ return ",".join(rv)
+
+def get_config(root_dir, hosts, duration=30):
+ if hosts is None:
+ san_line = ""
+ else:
+ san_line = "subjectAltName = %s" % make_alt_names(hosts)
+
+ if os.path.sep == "\\":
+ # This seems to be needed for the Shining Light OpenSSL on
+ # Windows, at least.
+ root_dir = root_dir.replace("\\", "\\\\")
+
+ rv = """[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir = %(root_dir)s
+certs = $dir
+new_certs_dir = $certs
+crl_dir = $dir%(sep)scrl
+database = $dir%(sep)sindex.txt
+private_key = $dir%(sep)scakey.pem
+certificate = $dir%(sep)scacert.pem
+serial = $dir%(sep)sserial
+crldir = $dir%(sep)scrl
+crlnumber = $dir%(sep)scrlnumber
+crl = $crldir%(sep)scrl.pem
+RANDFILE = $dir%(sep)sprivate%(sep)s.rand
+x509_extensions = usr_cert
+name_opt = ca_default
+cert_opt = ca_default
+default_days = %(duration)d
+default_crl_days = %(duration)d
+default_md = sha256
+preserve = no
+policy = policy_anything
+copy_extensions = copy
+
+[ policy_anything ]
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+[ req ]
+default_bits = 2048
+default_keyfile = privkey.pem
+distinguished_name = req_distinguished_name
+attributes = req_attributes
+x509_extensions = v3_ca
+
+# Passwords for private keys if not present they will be prompted for
+# input_password = secret
+# output_password = secret
+string_mask = utf8only
+req_extensions = v3_req
+
+[ req_distinguished_name ]
+countryName = Country Name (2 letter code)
+countryName_default = AU
+countryName_min = 2
+countryName_max = 2
+stateOrProvinceName = State or Province Name (full name)
+stateOrProvinceName_default =
+localityName = Locality Name (eg, city)
+0.organizationName = Organization Name
+0.organizationName_default = Web Platform Tests
+organizationalUnitName = Organizational Unit Name (eg, section)
+#organizationalUnitName_default =
+commonName = Common Name (e.g. server FQDN or YOUR name)
+commonName_max = 64
+emailAddress = Email Address
+emailAddress_max = 64
+
+[ req_attributes ]
+
+[ usr_cert ]
+basicConstraints=CA:false
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer
+
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+%(san_line)s
+
+[ v3_ca ]
+basicConstraints = CA:true
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid:always,issuer:always
+keyUsage = keyCertSign
+""" % {"root_dir": root_dir,
+ "san_line": san_line,
+ "duration": duration,
+ "sep": os.path.sep.replace("\\", "\\\\")}
+
+ return rv
+
+class OpenSSLEnvironment(object):
+ ssl_enabled = True
+
+ def __init__(self, logger, openssl_binary="openssl", base_path=None,
+ password="web-platform-tests", force_regenerate=False,
+ duration=30, base_conf_path=None):
+ """SSL environment that creates a local CA and host certificate using OpenSSL.
+
+ By default this will look in base_path for existing certificates that are still
+ valid and only create new certificates if there aren't any. This behaviour can
+ be adjusted using the force_regenerate option.
+
+ :param logger: a stdlib logging compatible logger or mozlog structured logger
+ :param openssl_binary: Path to the OpenSSL binary
+ :param base_path: Path in which certificates will be stored. If None, a temporary
+ directory will be used and removed when the server shuts down
+ :param password: Password to use
+ :param force_regenerate: Always create a new certificate even if one already exists.
+ """
+ self.logger = logger
+
+ self.temporary = False
+ if base_path is None:
+ base_path = tempfile.mkdtemp()
+ self.temporary = True
+
+ self.base_path = os.path.abspath(base_path)
+ self.password = password
+ self.force_regenerate = force_regenerate
+ self.duration = duration
+ self.base_conf_path = base_conf_path
+
+ self.path = None
+ self.binary = openssl_binary
+ self.openssl = None
+
+ self._ca_cert_path = None
+ self._ca_key_path = None
+ self.host_certificates = {}
+
+ def __enter__(self):
+ if not os.path.exists(self.base_path):
+ os.makedirs(self.base_path)
+
+ path = functools.partial(os.path.join, self.base_path)
+
+ with open(path("index.txt"), "w"):
+ pass
+ with open(path("serial"), "w") as f:
+ serial = "%x" % random.randint(0, 1000000)
+ if len(serial) % 2:
+ serial = "0" + serial
+ f.write(serial)
+
+ self.path = path
+
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ if self.temporary:
+ shutil.rmtree(self.base_path)
+
+ def _config_openssl(self, hosts):
+ conf_path = self.path("openssl.cfg")
+ return OpenSSL(self.logger, self.binary, self.base_path, conf_path, hosts,
+ self.duration, self.base_conf_path)
+
+ def ca_cert_path(self):
+ """Get the path to the CA certificate file, generating a
+ new one if needed"""
+ if self._ca_cert_path is None and not self.force_regenerate:
+ self._load_ca_cert()
+ if self._ca_cert_path is None:
+ self._generate_ca()
+ return self._ca_cert_path
+
+ def _load_ca_cert(self):
+ key_path = self.path("cakey.pem")
+ cert_path = self.path("cacert.pem")
+
+ if self.check_key_cert(key_path, cert_path, None):
+ self.logger.info("Using existing CA cert")
+ self._ca_key_path, self._ca_cert_path = key_path, cert_path
+
+ def check_key_cert(self, key_path, cert_path, hosts):
+ """Check that a key and cert file exist and are valid"""
+ if not os.path.exists(key_path) or not os.path.exists(cert_path):
+ return False
+
+ with self._config_openssl(hosts) as openssl:
+ end_date_str = openssl("x509",
+ "-noout",
+ "-enddate",
+ "-in", cert_path).split("=", 1)[1].strip()
+ # Not sure if this works in other locales
+ end_date = datetime.strptime(end_date_str, "%b %d %H:%M:%S %Y %Z")
+ # Should have some buffer here e.g. 1 hr
+ if end_date < datetime.now():
+ return False
+
+ #TODO: check the key actually signed the cert.
+ return True
+
+ def _generate_ca(self):
+ path = self.path
+ self.logger.info("Generating new CA in %s" % self.base_path)
+
+ key_path = path("cakey.pem")
+ req_path = path("careq.pem")
+ cert_path = path("cacert.pem")
+
+ with self._config_openssl(None) as openssl:
+ openssl("req",
+ "-batch",
+ "-new",
+ "-newkey", "rsa:2048",
+ "-keyout", key_path,
+ "-out", req_path,
+ "-subj", make_subject("web-platform-tests"),
+ "-passout", "pass:%s" % self.password)
+
+ openssl("ca",
+ "-batch",
+ "-create_serial",
+ "-keyfile", key_path,
+ "-passin", "pass:%s" % self.password,
+ "-selfsign",
+ "-extensions", "v3_ca",
+ "-in", req_path,
+ "-out", cert_path)
+
+ os.unlink(req_path)
+
+ self._ca_key_path, self._ca_cert_path = key_path, cert_path
+
+ def host_cert_path(self, hosts):
+ """Get a tuple of (private key path, certificate path) for a host,
+ generating new ones if necessary.
+
+ hosts must be a list of all hosts to appear on the certificate, with
+ the primary hostname first."""
+ hosts = tuple(hosts)
+ if hosts not in self.host_certificates:
+ if not self.force_regenerate:
+ key_cert = self._load_host_cert(hosts)
+ else:
+ key_cert = None
+ if key_cert is None:
+ key, cert = self._generate_host_cert(hosts)
+ else:
+ key, cert = key_cert
+ self.host_certificates[hosts] = key, cert
+
+ return self.host_certificates[hosts]
+
+ def _load_host_cert(self, hosts):
+ host = hosts[0]
+ key_path = self.path("%s.key" % host)
+ cert_path = self.path("%s.pem" % host)
+
+ # TODO: check that this cert was signed by the CA cert
+ if self.check_key_cert(key_path, cert_path, hosts):
+ self.logger.info("Using existing host cert")
+ return key_path, cert_path
+
+ def _generate_host_cert(self, hosts):
+ host = hosts[0]
+ if self._ca_key_path is None:
+ self._generate_ca()
+ ca_key_path = self._ca_key_path
+
+ assert os.path.exists(ca_key_path)
+
+ path = self.path
+
+ req_path = path("wpt.req")
+ cert_path = path("%s.pem" % host)
+ key_path = path("%s.key" % host)
+
+ self.logger.info("Generating new host cert")
+
+ with self._config_openssl(hosts) as openssl:
+ openssl("req",
+ "-batch",
+ "-newkey", "rsa:2048",
+ "-keyout", key_path,
+ "-in", ca_key_path,
+ "-nodes",
+ "-out", req_path)
+
+ openssl("ca",
+ "-batch",
+ "-in", req_path,
+ "-passin", "pass:%s" % self.password,
+ "-subj", make_subject(host),
+ "-out", cert_path)
+
+ os.unlink(req_path)
+
+ return key_path, cert_path
diff --git a/testing/web-platform/tests/tools/sslutils/pregenerated.py b/testing/web-platform/tests/tools/sslutils/pregenerated.py
new file mode 100644
index 000000000..fc487df3a
--- /dev/null
+++ b/testing/web-platform/tests/tools/sslutils/pregenerated.py
@@ -0,0 +1,26 @@
+class PregeneratedSSLEnvironment(object):
+ """SSL environment to use with existing key/certificate files
+ e.g. when running on a server with a public domain name
+ """
+ ssl_enabled = True
+
+ def __init__(self, logger, host_key_path, host_cert_path,
+ ca_cert_path=None):
+ self._ca_cert_path = ca_cert_path
+ self._host_key_path = host_key_path
+ self._host_cert_path = host_cert_path
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+ def host_cert_path(self, hosts):
+ """Return the key and certificate paths for the host"""
+ return self._host_key_path, self._host_cert_path
+
+ def ca_cert_path(self):
+ """Return the certificate path of the CA that signed the
+ host certificates, or None if that isn't known"""
+ return self._ca_cert_path
diff --git a/testing/web-platform/tests/tools/tox.ini b/testing/web-platform/tests/tools/tox.ini
new file mode 100644
index 000000000..a5edd70ad
--- /dev/null
+++ b/testing/web-platform/tests/tools/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+envlist = py27,py35,pypy
+skipsdist=True
+
+[testenv]
+deps =
+ flake8
+ pytest
+ coverage
+ mock
+
+commands =
+ coverage run -m pytest
+ flake8
+
+[flake8]
+ignore = E128,E129,E221,E226,E231,E251,E265,E302,E303,E402,E901,F401,F821,F841
+max-line-length = 141
+exclude = .tox,html5lib,py,pytest,pywebsocket,six,webdriver,wptserve
diff --git a/testing/web-platform/tests/tools/webdriver/.gitignore b/testing/web-platform/tests/tools/webdriver/.gitignore
new file mode 100644
index 000000000..c99747ced
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/.gitignore
@@ -0,0 +1 @@
+webdriver.egg-info/
diff --git a/testing/web-platform/tests/tools/webdriver/COPYING b/testing/web-platform/tests/tools/webdriver/COPYING
new file mode 100644
index 000000000..14e2f777f
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/COPYING
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/testing/web-platform/tests/tools/webdriver/README.md b/testing/web-platform/tests/tools/webdriver/README.md
new file mode 100644
index 000000000..194009f8a
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/README.md
@@ -0,0 +1,75 @@
+# WebDriver client for Python
+
+This package provides Python bindings
+that conform to the [W3C WebDriver standard](https://w3c.github.io/webdriver/webdriver-spec.html),
+which specifies a remote control protocol for web browsers.
+
+These bindings are written with determining
+implementation compliance to the specification in mind,
+so that different remote end drivers
+can determine whether they meet the recognised standard.
+The client is used for the WebDriver specification tests
+in the [Web Platform Tests](https://github.com/w3c/web-platform-tests).
+
+## Installation
+
+To install the package individually
+in your virtualenv or system-wide:
+
+ % python setup.py install
+
+Since this package does not have any external dependencies,
+you can also use the client directly from the checkout directory,
+which is useful if you want to contribute patches back:
+
+ % cd /path/to/wdclient
+ % python
+ Python 2.7.12+ (default, Aug 4 2016, 20:04:34)
+ [GCC 6.1.1 20160724] on linux2
+ Type "help", "copyright", "credits" or "license" for more information.
+ >>> import webdriver
+ >>>
+
+If you are writing WebDriver specification tests for
+[WPT](https://github.com/w3c/web-platform-tests),
+there is no need to install the client manually
+as it is picked up as a submodule to
+[wpt-tools](https://github.com/w3c/wpt-tools)
+that is checked out in `./tools`.
+
+## Usage
+
+You can use the built-in
+[context manager](https://docs.python.org/2/reference/compound_stmts.html#the-with-statement)
+to manage the lifetime of the session.
+The session is started implicitly
+at the first call to a command if it has not already been started,
+and will implicitly be ended when exiting the context:
+
+```py
+import webdriver
+
+with webdriver.Session("127.0.0.1", 4444) as session:
+ session.url = "https://mozilla.org"
+ print "The current URL is %s" % session.url
+```
+
+The following is functionally equivalent to the above,
+but giving you manual control of the session:
+
+```py
+import webdriver
+
+session = webdriver.Session("127.0.0.1", 4444)
+session.start()
+
+session.url = "https://mozilla.org"
+print "The current URL is %s" % session.url
+
+session.end()
+```
+
+## Dependencies
+
+This client has the benefit of only using standard library dependencies.
+No external PyPI dependencies are needed.
diff --git a/testing/web-platform/tests/tools/webdriver/setup.py b/testing/web-platform/tests/tools/webdriver/setup.py
new file mode 100644
index 000000000..720fcf05c
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/setup.py
@@ -0,0 +1,18 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from setuptools import setup, find_packages
+
+setup(name="webdriver",
+ version="1.0",
+ description="WebDriver client compatible with "
+ "the W3C browser automation specification.",
+ author="Mozilla Engineering Productivity",
+ author_email="tools@lists.mozilla.org",
+ license="MPL 2.0",
+ packages=find_packages(),
+ classifiers=["Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Operating System :: OS Independent"])
diff --git a/testing/web-platform/tests/tools/webdriver/webdriver/__init__.py b/testing/web-platform/tests/tools/webdriver/webdriver/__init__.py
new file mode 100644
index 000000000..c827f59f1
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/webdriver/__init__.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from client import Cookies, Element, Find, Session, Timeouts, Window
+from error import (
+ ElementNotSelectableException,
+ ElementNotVisibleException,
+ InvalidArgumentException,
+ InvalidCookieDomainException,
+ InvalidElementCoordinatesException,
+ InvalidElementStateException,
+ InvalidSelectorException,
+ InvalidSessionIdException,
+ JavascriptErrorException,
+ MoveTargetOutOfBoundsException,
+ NoSuchAlertException,
+ NoSuchElementException,
+ NoSuchFrameException,
+ NoSuchWindowException,
+ ScriptTimeoutException,
+ SessionNotCreatedException,
+ StaleElementReferenceException,
+ TimeoutException,
+ UnableToSetCookieException,
+ UnexpectedAlertOpenException,
+ UnknownCommandException,
+ UnknownErrorException,
+ UnknownMethodException,
+ UnsupportedOperationException,
+ WebDriverException)
diff --git a/testing/web-platform/tests/tools/webdriver/webdriver/client.py b/testing/web-platform/tests/tools/webdriver/webdriver/client.py
new file mode 100644
index 000000000..8e1813f25
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/webdriver/client.py
@@ -0,0 +1,595 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import urlparse
+
+import error
+import transport
+
+
+element_key = "element-6066-11e4-a52e-4f735466cecf"
+
+
+def command(func):
+ def inner(self, *args, **kwargs):
+ if hasattr(self, "session"):
+ session = self.session
+ else:
+ session = self
+
+ if session.session_id is None:
+ session.start()
+ assert session.session_id != None
+
+ return func(self, *args, **kwargs)
+
+ inner.__name__ = func.__name__
+ inner.__doc__ = func.__doc__
+
+ return inner
+
+
+class Timeouts(object):
+ def __init__(self, session):
+ self.session = session
+ self._script = 30
+ self._load = 0
+ self._implicit_wait = 0
+
+ def _set_timeouts(self, name, value):
+ body = {"type": name,
+ "ms": value * 1000}
+ return self.session.send_command("POST", "timeouts", body)
+
+ @property
+ def script(self):
+ return self._script
+
+ @script.setter
+ def script(self, value):
+ self._set_timeouts("script", value)
+ self._script = value
+
+ @property
+ def load(self):
+ return self._load
+
+ @load.setter
+ def set_load(self, value):
+ self._set_timeouts("page load", value)
+ self._script = value
+
+ @property
+ def implicit_wait(self):
+ return self._implicit_wait
+
+ @implicit_wait.setter
+ def implicit_wait(self, value):
+ self._set_timeouts("implicit wait", value)
+ self._implicit_wait = value
+
+
+class ActionSequence(object):
+ """API for creating and performing action sequences.
+
+ Each action method adds one or more actions to a queue. When perform()
+ is called, the queued actions fire in order.
+
+ May be chained together as in::
+
+ ActionSequence(session, "key", id) \
+ .key_down("a") \
+ .key_up("a") \
+ .perform()
+ """
+ def __init__(self, session, action_type, input_id, pointer_params=None):
+ """Represents a sequence of actions of one type for one input source.
+
+ :param session: WebDriver session.
+ :param action_type: Action type; may be "none", "key", or "pointer".
+ :param input_id: ID of input source.
+ :param pointer_params: Optional dictionary of pointer parameters.
+ """
+ self.session = session
+ self._id = input_id
+ self._type = action_type
+ self._actions = []
+ self._pointer_params = pointer_params
+
+ @property
+ def dict(self):
+ d = {
+ "type": self._type,
+ "id": self._id,
+ "actions": self._actions,
+ }
+ if self._pointer_params is not None:
+ d["parameters"] = self._pointer_params
+ return d
+
+ @command
+ def perform(self):
+ """Perform all queued actions."""
+ self.session.actions.perform([self.dict])
+
+ def _key_action(self, subtype, value):
+ self._actions.append({"type": subtype, "value": value})
+
+ def _pointer_action(self, subtype, button):
+ self._actions.append({"type": subtype, "button": button})
+
+ def pointer_move(self, x, y, duration=None, origin=None):
+ """Queue a pointerMove action.
+
+ :param x: Destination x-axis coordinate of pointer in CSS pixels.
+ :param y: Destination y-axis coordinate of pointer in CSS pixels.
+ :param duration: Number of milliseconds over which to distribute the
+ move. If None, remote end defaults to 0.
+ :param origin: Origin of coordinates, either "viewport", "pointer" or
+ an Element. If None, remote end defaults to "viewport".
+ """
+ # TODO change to pointerMove once geckodriver > 0.14 is available on mozilla-central
+ action = {
+ "type": "move",
+ "x": x,
+ "y": y
+ }
+ if duration is not None:
+ action["duration"] = duration
+ if origin is not None:
+ action["origin"] = origin if isinstance(origin, basestring) else origin.json()
+ self._actions.append(action)
+ return self
+
+ def pointer_up(self, button):
+ """Queue a pointerUp action for `button`.
+
+ :param button: Pointer button to perform action with.
+ """
+ self._pointer_action("pointerUp", button)
+ return self
+
+ def pointer_down(self, button):
+ """Queue a pointerDown action for `button`.
+
+ :param button: Pointer button to perform action with.
+ """
+ self._pointer_action("pointerDown", button)
+ return self
+
+ def key_up(self, value):
+ """Queue a keyUp action for `value`.
+
+ :param value: Character to perform key action with.
+ """
+ self._key_action("keyUp", value)
+ return self
+
+ def key_down(self, value):
+ """Queue a keyDown action for `value`.
+
+ :param value: Character to perform key action with.
+ """
+ self._key_action("keyDown", value)
+ return self
+
+ def send_keys(self, keys):
+ """Queue a keyDown and keyUp action for each character in `keys`.
+
+ :param keys: String of keys to perform key actions with.
+ """
+ for c in keys:
+ self.key_down(c)
+ self.key_up(c)
+ return self
+
+
+class Actions(object):
+ def __init__(self, session):
+ self.session = session
+
+ @command
+ def perform(self, actions=None):
+ """Performs actions by tick from each action sequence in `actions`.
+
+ :param actions: List of input source action sequences. A single action
+ sequence may be created with the help of
+ ``ActionSequence.dict``.
+ """
+ body = {"actions": [] if actions is None else actions}
+ return self.session.send_command("POST", "actions", body)
+
+ @command
+ def release(self):
+ return self.session.send_command("DELETE", "actions")
+
+ def sequence(self, *args, **kwargs):
+ """Return an empty ActionSequence of the designated type.
+
+ See ActionSequence for parameter list.
+ """
+ return ActionSequence(self.session, *args, **kwargs)
+
+class Window(object):
+ def __init__(self, session):
+ self.session = session
+
+ @property
+ @command
+ def size(self):
+ resp = self.session.send_command("GET", "window/size")
+ return (resp["width"], resp["height"])
+
+ @size.setter
+ @command
+ def size(self, (width, height)):
+ body = {"width": width, "height": height}
+ self.session.send_command("POST", "window/size", body)
+
+ @property
+ @command
+ def position(self):
+ resp = self.session.send_command("GET", "window/position")
+ return (resp["x"], resp["y"])
+
+ @position.setter
+ @command
+ def position(self, (x, y)):
+ body = {"x": x, "y": y}
+ self.session.send_command("POST", "window/position", body)
+
+ @property
+ @command
+ def maximize(self):
+ return self.session.send_command("POST", "window/maximize")
+
+
+class Find(object):
+ def __init__(self, session):
+ self.session = session
+
+ @command
+ def css(self, selector, all=True):
+ return self._find_element("css selector", selector, all)
+
+ def _find_element(self, strategy, selector, all):
+ route = "elements" if all else "element"
+
+ body = {"using": strategy,
+ "value": selector}
+
+ data = self.session.send_command("POST", route, body, key="value")
+
+ if all:
+ rv = [self.session._element(item) for item in data]
+ else:
+ rv = self.session._element(data)
+
+ return rv
+
+
+class Cookies(object):
+ def __init__(self, session):
+ self.session = session
+
+ def __getitem__(self, name):
+ self.session.send_command("GET", "cookie/%s" % name, {}, key="value")
+
+ def __setitem__(self, name, value):
+ cookie = {"name": name,
+ "value": None}
+
+ if isinstance(name, (str, unicode)):
+ cookie["value"] = value
+ elif hasattr(value, "value"):
+ cookie["value"] = value.value
+ self.session.send_command("POST", "cookie/%s" % name, {}, key="value")
+
+
+class UserPrompt(object):
+ def __init__(self, session):
+ self.session = session
+
+ @command
+ def dismiss(self):
+ self.session.send_command("POST", "alert/dismiss")
+
+ @command
+ def accept(self):
+ self.session.send_command("POST", "alert/accept")
+
+ @property
+ @command
+ def text(self):
+ return self.session.send_command("GET", "alert/text", key="value")
+
+ @text.setter
+ @command
+ def text(self, value):
+ body = {"value": list(value)}
+ self.session.send_command("POST", "alert/text", body=body)
+
+
+class Session(object):
+ def __init__(self, host, port, url_prefix="/", desired_capabilities=None,
+ required_capabilities=None, timeout=transport.HTTP_TIMEOUT,
+ extension=None):
+ self.transport = transport.HTTPWireProtocol(
+ host, port, url_prefix, timeout=timeout)
+ self.desired_capabilities = desired_capabilities
+ self.required_capabilities = required_capabilities
+ self.session_id = None
+ self.timeouts = None
+ self.window = None
+ self.find = None
+ self._element_cache = {}
+ self.extension = None
+ self.extension_cls = extension
+
+ self.timeouts = Timeouts(self)
+ self.window = Window(self)
+ self.find = Find(self)
+ self.alert = UserPrompt(self)
+ self.actions = Actions(self)
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.end()
+
+ def __del__(self):
+ self.end()
+
+ def start(self):
+ if self.session_id is not None:
+ return
+
+ body = {}
+
+ caps = {}
+ if self.desired_capabilities is not None:
+ caps["desiredCapabilities"] = self.desired_capabilities
+ if self.required_capabilities is not None:
+ caps["requiredCapabilities"] = self.required_capabilities
+ #body["capabilities"] = caps
+ body = caps
+
+ resp = self.transport.send("POST", "session", body=body)
+ self.session_id = resp["sessionId"]
+
+ if self.extension_cls:
+ self.extension = self.extension_cls(self)
+
+ return resp["value"]
+
+ def end(self):
+ if self.session_id is None:
+ return
+
+ url = "session/%s" % self.session_id
+ self.transport.send("DELETE", url)
+
+ self.session_id = None
+ self.timeouts = None
+ self.window = None
+ self.find = None
+ self.extension = None
+
+ def send_command(self, method, url, body=None, key=None):
+ if self.session_id is None:
+ raise error.SessionNotCreatedException()
+ url = urlparse.urljoin("session/%s/" % self.session_id, url)
+ return self.transport.send(method, url, body, key=key)
+
+ @property
+ @command
+ def url(self):
+ return self.send_command("GET", "url", key="value")
+
+ @url.setter
+ @command
+ def url(self, url):
+ if urlparse.urlsplit(url).netloc is None:
+ return self.url(url)
+ body = {"url": url}
+ return self.send_command("POST", "url", body)
+
+ @command
+ def back(self):
+ return self.send_command("POST", "back")
+
+ @command
+ def forward(self):
+ return self.send_command("POST", "forward")
+
+ @command
+ def refresh(self):
+ return self.send_command("POST", "refresh")
+
+ @property
+ @command
+ def title(self):
+ return self.send_command("GET", "title", key="value")
+
+ @property
+ @command
+ def window_handle(self):
+ return self.send_command("GET", "window_handle", key="value")
+
+ @window_handle.setter
+ @command
+ def window_handle(self, handle):
+ body = {"handle": handle}
+ return self.send_command("POST", "window", body=body)
+
+ def switch_frame(self, frame):
+ if frame == "parent":
+ url = "frame/parent"
+ body = None
+ else:
+ url = "frame"
+ if isinstance(frame, Element):
+ body = {"id": frame.json()}
+ else:
+ body = {"id": frame}
+
+ return self.send_command("POST", url, body)
+
+ @command
+ def close(self):
+ return self.send_command("DELETE", "window_handle")
+
+ @property
+ @command
+ def handles(self):
+ return self.send_command("GET", "window_handles", key="value")
+
+ @property
+ @command
+ def active_element(self):
+ data = self.send_command("GET", "element/active", key="value")
+ if data is not None:
+ return self._element(data)
+
+ def _element(self, data):
+ elem_id = data[element_key]
+ assert elem_id
+ if elem_id in self._element_cache:
+ return self._element_cache[elem_id]
+ return Element(self, elem_id)
+
+ @command
+ def cookies(self, name=None):
+ if name is None:
+ url = "cookie"
+ else:
+ url = "cookie/%s" % name
+ return self.send_command("GET", url, {}, key="value")
+
+ @command
+ def set_cookie(self, name, value, path=None, domain=None, secure=None, expiry=None):
+ body = {"name": name,
+ "value": value}
+ if path is not None:
+ body["path"] = path
+ if domain is not None:
+ body["domain"] = domain
+ if secure is not None:
+ body["secure"] = secure
+ if expiry is not None:
+ body["expiry"] = expiry
+ self.send_command("POST", "cookie", {"cookie": body})
+
+ def delete_cookie(self, name=None):
+ if name is None:
+ url = "cookie"
+ else:
+ url = "cookie/%s" % name
+ self.send_command("DELETE", url, {}, key="value")
+
+ #[...]
+
+ @command
+ def execute_script(self, script, args=None):
+ if args is None:
+ args = []
+
+ body = {
+ "script": script,
+ "args": args
+ }
+ return self.send_command("POST", "execute", body, key="value")
+
+ @command
+ def execute_async_script(self, script, args=None):
+ if args is None:
+ args = []
+
+ body = {
+ "script": script,
+ "args": args
+ }
+ return self.send_command("POST", "execute_async", body, key="value")
+
+ #[...]
+
+ @command
+ def screenshot(self):
+ return self.send_command("GET", "screenshot", key="value")
+
+
+class Element(object):
+ def __init__(self, session, id):
+ self.session = session
+ self.id = id
+ assert id not in self.session._element_cache
+ self.session._element_cache[self.id] = self
+
+ def json(self):
+ return {element_key: self.id}
+
+ @property
+ def session_id(self):
+ return self.session.session_id
+
+ def url(self, suffix):
+ return "element/%s/%s" % (self.id, suffix)
+
+ @command
+ def find_element(self, strategy, selector):
+ body = {"using": strategy,
+ "value": selector}
+
+ elem = self.session.send_command("POST", self.url("element"), body, key="value")
+ return self.session.element(elem)
+
+ @command
+ def click(self):
+ self.session.send_command("POST", self.url("click"), {})
+
+ @command
+ def tap(self):
+ self.session.send_command("POST", self.url("tap"), {})
+
+ @command
+ def clear(self):
+ self.session.send_command("POST", self.url("clear"), {})
+
+ @command
+ def send_keys(self, keys):
+ if isinstance(keys, (str, unicode)):
+ keys = [char for char in keys]
+
+ body = {"value": keys}
+
+ return self.session.send_command("POST", self.url("value"), body)
+
+ @property
+ @command
+ def text(self):
+ return self.session.send_command("GET", self.url("text"), key="value")
+
+ @property
+ @command
+ def name(self):
+ return self.session.send_command("GET", self.url("name"), key="value")
+
+ @command
+ def style(self, property_name):
+ return self.session.send_command("GET", self.url("css/%s" % property_name), key="value")
+
+ @property
+ @command
+ def rect(self):
+ return self.session.send_command("GET", self.url("rect"))
+
+ @command
+ def property(self, name):
+ return self.session.send_command("GET", self.url("property/%s" % name), key="value")
+
+ @command
+ def attribute(self, name):
+ return self.session.send_command("GET", self.url("attribute/%s" % name), key="value")
diff --git a/testing/web-platform/tests/tools/webdriver/webdriver/error.py b/testing/web-platform/tests/tools/webdriver/webdriver/error.py
new file mode 100644
index 000000000..dc168f3ef
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/webdriver/error.py
@@ -0,0 +1,144 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import collections
+
+
+class WebDriverException(Exception):
+ http_status = None
+ status_code = None
+
+
+class ElementNotSelectableException(WebDriverException):
+ http_status = 400
+ status_code = "element not selectable"
+
+
+class ElementNotVisibleException(WebDriverException):
+ http_status = 400
+ status_code = "element not visible"
+
+
+class InvalidArgumentException(WebDriverException):
+ http_status = 400
+ status_code = "invalid argument"
+
+
+class InvalidCookieDomainException(WebDriverException):
+ http_status = 400
+ status_code = "invalid cookie domain"
+
+
+class InvalidElementCoordinatesException(WebDriverException):
+ http_status = 400
+ status_code = "invalid element coordinates"
+
+
+class InvalidElementStateException(WebDriverException):
+ http_status = 400
+ status_code = "invalid cookie domain"
+
+
+class InvalidSelectorException(WebDriverException):
+ http_status = 400
+ status_code = "invalid selector"
+
+
+class InvalidSessionIdException(WebDriverException):
+ http_status = 404
+ status_code = "invalid session id"
+
+
+class JavascriptErrorException(WebDriverException):
+ http_status = 500
+ status_code = "javascript error"
+
+
+class MoveTargetOutOfBoundsException(WebDriverException):
+ http_status = 500
+ status_code = "move target out of bounds"
+
+
+class NoSuchAlertException(WebDriverException):
+ http_status = 400
+ status_code = "no such alert"
+
+
+class NoSuchElementException(WebDriverException):
+ http_status = 404
+ status_code = "no such element"
+
+
+class NoSuchFrameException(WebDriverException):
+ http_status = 400
+ status_code = "no such frame"
+
+
+class NoSuchWindowException(WebDriverException):
+ http_status = 400
+ status_code = "no such window"
+
+
+class ScriptTimeoutException(WebDriverException):
+ http_status = 408
+ status_code = "script timeout"
+
+
+class SessionNotCreatedException(WebDriverException):
+ http_status = 500
+ status_code = "session not created"
+
+
+class StaleElementReferenceException(WebDriverException):
+ http_status = 400
+ status_code = "stale element reference"
+
+
+class TimeoutException(WebDriverException):
+ http_status = 408
+ status_code = "timeout"
+
+
+class UnableToSetCookieException(WebDriverException):
+ http_status = 500
+ status_code = "unable to set cookie"
+
+
+class UnexpectedAlertOpenException(WebDriverException):
+ http_status = 500
+ status_code = "unexpected alert open"
+
+
+class UnknownErrorException(WebDriverException):
+ http_status = 500
+ status_code = "unknown error"
+
+
+class UnknownCommandException(WebDriverException):
+ http_status = 404
+ status_code = "unknown command"
+
+
+class UnknownMethodException(WebDriverException):
+ http_status = 405
+ status_code = "unknown method"
+
+
+class UnsupportedOperationException(WebDriverException):
+ http_status = 500
+ status_code = "unsupported operation"
+
+
+def get(status_code):
+ """Gets exception from `status_code`, falling back to
+ ``WebDriverException`` if it is not found.
+ """
+ return _errors.get(status_code, WebDriverException)
+
+
+_errors = collections.defaultdict()
+for item in locals().values():
+ if type(item) == type and issubclass(item, WebDriverException):
+ _errors[item.status_code] = item
diff --git a/testing/web-platform/tests/tools/webdriver/webdriver/servo.py b/testing/web-platform/tests/tools/webdriver/webdriver/servo.py
new file mode 100644
index 000000000..fd1539f7b
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/webdriver/servo.py
@@ -0,0 +1,22 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+class ServoExtensionCommands(object):
+ def __init__(self, session):
+ self.session = session
+
+ @command
+ def get_prefs(self, *prefs):
+ body = {"prefs": list(prefs)}
+ return self.session.send_command("POST", "servo/prefs/get", body)
+
+ @command
+ def set_prefs(self, prefs):
+ body = {"prefs": prefs}
+ return self.session.send_command("POST", "servo/prefs/set", body)
+
+ @command
+ def reset_prefs(self, *prefs):
+ body = {"prefs": list(prefs)}
+ return self.session.send_command("POST", "servo/prefs/reset", body)
diff --git a/testing/web-platform/tests/tools/webdriver/webdriver/transport.py b/testing/web-platform/tests/tools/webdriver/webdriver/transport.py
new file mode 100644
index 000000000..f3c1ed536
--- /dev/null
+++ b/testing/web-platform/tests/tools/webdriver/webdriver/transport.py
@@ -0,0 +1,83 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import httplib
+import json
+import urlparse
+
+import error
+
+
+HTTP_TIMEOUT = 5
+
+
+class HTTPWireProtocol(object):
+ """Transports messages (commands and responses) over the WebDriver
+ wire protocol.
+ """
+
+ def __init__(self, host, port, url_prefix="/", timeout=HTTP_TIMEOUT):
+ """Construct interface for communicating with the remote server.
+
+ :param url: URL of remote WebDriver server.
+ :param wait: Duration to wait for remote to appear.
+ """
+
+ self.host = host
+ self.port = port
+ self.url_prefix = url_prefix
+
+ self._timeout = timeout
+
+ def url(self, suffix):
+ return urlparse.urljoin(self.path_prefix, suffix)
+
+ def send(self, method, url, body=None, headers=None, key=None):
+ """Send a command to the remote.
+
+ :param method: "POST" or "GET".
+ :param body: Body of the request. Defaults to an empty dictionary
+ if ``method`` is "POST".
+ :param headers: Additional headers to include in the request.
+ :param key: Extract this key from the dictionary returned from
+ the remote.
+ """
+
+ if body is None and method == "POST":
+ body = {}
+
+ if isinstance(body, dict):
+ body = json.dumps(body)
+
+ if isinstance(body, unicode):
+ body = body.encode("utf-8")
+
+ if headers is None:
+ headers = {}
+
+ url = self.url_prefix + url
+
+ conn = httplib.HTTPConnection(
+ self.host, self.port, strict=True, timeout=self._timeout)
+ conn.request(method, url, body, headers)
+
+ resp = conn.getresponse()
+ resp_body = resp.read()
+ conn.close()
+
+ try:
+ data = json.loads(resp_body)
+ except:
+ raise IOError("Could not parse response body as JSON: '%s'" % resp_body)
+
+ if resp.status != 200:
+ cls = error.get(data.get("error"))
+ raise cls(data.get("message"))
+
+ if key is not None:
+ data = data[key]
+ if not data:
+ data = None
+
+ return data
diff --git a/testing/web-platform/tests/tools/wptserve/.gitignore b/testing/web-platform/tests/tools/wptserve/.gitignore
new file mode 100644
index 000000000..8e87d3884
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/.gitignore
@@ -0,0 +1,40 @@
+*.py[cod]
+*~
+\#*
+
+docs/_build/
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+nosetests.xml
+tests/functional/html/*
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
diff --git a/testing/web-platform/tests/tools/wptserve/.travis.yml b/testing/web-platform/tests/tools/wptserve/.travis.yml
new file mode 100644
index 000000000..00183731b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/.travis.yml
@@ -0,0 +1,24 @@
+language: python
+
+sudo: false
+
+cache:
+ directories:
+ - $HOME/.cache/pip
+
+matrix:
+ include:
+ - python: 2.7
+ env: TOXENV=py27
+ - python: pypy
+ env: TOXENV=pypy
+
+install:
+ - pip install -U tox codecov
+
+script:
+ - tox
+
+after_success:
+ - coverage combine
+ - codecov
diff --git a/testing/web-platform/tests/tools/wptserve/LICENSE b/testing/web-platform/tests/tools/wptserve/LICENSE
new file mode 100644
index 000000000..45896e6be
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/LICENSE
@@ -0,0 +1,30 @@
+W3C 3-clause BSD License
+
+http://www.w3.org/Consortium/Legal/2008/03-bsd-license.html
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of works must retain the original copyright notice,
+ this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the original copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* Neither the name of the W3C nor the names of its contributors may be
+ used to endorse or promote products derived from this work without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/testing/web-platform/tests/tools/wptserve/MANIFEST.in b/testing/web-platform/tests/tools/wptserve/MANIFEST.in
new file mode 100644
index 000000000..4bf448352
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/MANIFEST.in
@@ -0,0 +1 @@
+include README.md \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/wptserve/README.md b/testing/web-platform/tests/tools/wptserve/README.md
new file mode 100644
index 000000000..c0c88e2c3
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/README.md
@@ -0,0 +1,4 @@
+wptserve
+========
+
+Web server designed for use with web-platform-tests
diff --git a/testing/web-platform/tests/tools/wptserve/docs/Makefile b/testing/web-platform/tests/tools/wptserve/docs/Makefile
new file mode 100644
index 000000000..250b6c864
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/wptserve.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/wptserve.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/wptserve"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/wptserve"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/testing/web-platform/tests/tools/wptserve/docs/conf.py b/testing/web-platform/tests/tools/wptserve/docs/conf.py
new file mode 100644
index 000000000..eae1c20cc
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/conf.py
@@ -0,0 +1,243 @@
+# -*- coding: utf-8 -*-
+#
+# wptserve documentation build configuration file, created by
+# sphinx-quickstart on Wed Aug 14 17:23:24 2013.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+sys.path.insert(0, os.path.abspath(".."))
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'wptserve'
+copyright = u'2013, Mozilla Foundation and other wptserve contributers'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'wptservedoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'wptserve.tex', u'wptserve Documentation',
+ u'James Graham', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'wptserve', u'wptserve Documentation',
+ [u'James Graham'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'wptserve', u'wptserve Documentation',
+ u'James Graham', 'wptserve', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
diff --git a/testing/web-platform/tests/tools/wptserve/docs/handlers.rst b/testing/web-platform/tests/tools/wptserve/docs/handlers.rst
new file mode 100644
index 000000000..c15aab635
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/handlers.rst
@@ -0,0 +1,111 @@
+Handlers
+========
+
+Handlers are functions that have the general signature::
+
+ handler(request, response)
+
+It is expected that the handler will use information from
+the request (e.g. the path) either to populate the response
+object with the data to send, or to directly write to the
+output stream via the ResponseWriter instance associated with
+the request. If a handler writes to the output stream then the
+server will not attempt additional writes, i.e. the choice to write
+directly in the handler or not is all-or-nothing.
+
+A number of general-purpose handler functions are provided by default:
+
+.. _handlers.Python:
+
+Python Handlers
+---------------
+
+Python handlers are functions which provide a higher-level API over
+manually updating the response object, by causing the return value of
+the function to provide (part of) the response. There are three
+possible sets of values that may be returned::
+
+
+ (status, headers, content)
+ (headers, content)
+ content
+
+Here `status` is either a tuple (status code, message) or simply a
+integer status code, `headers` is a list of (field name, value) pairs,
+and `content` is a string or an iterable returning strings. Such a
+function may also update the response manually. For example one may
+use `response.headers.set` to set a response header, and only return
+the content. One may even use this kind of handler, but manipulate
+the output socket directly, in which case the return value of the
+function, and the properties of the response object, will be ignored.
+
+The most common way to make a user function into a python handler is
+to use the provided `wptserve.handlers.handler` decorator::
+
+ from wptserve.handlers import handler
+
+ @handler
+ def test(request, response):
+ return [("X-Test": "PASS"), ("Content-Type", "text/plain")], "test"
+
+ #Later, assuming we have a Router object called 'router'
+
+ router.register("GET", "/test", test)
+
+JSON Handlers
+-------------
+
+This is a specialisation of the python handler type specifically
+designed to facilitate providing JSON responses. The API is largely
+the same as for a normal python handler, but the `content` part of the
+return value is JSON encoded, and a default Content-Type header of
+`application/json` is added. Again this handler is usually used as a
+decorator::
+
+ from wptserve.handlers import json_handler
+
+ @json_handler
+ def test(request, response):
+ return {"test": "PASS"}
+
+Python File Handlers
+--------------------
+
+Python file handlers are designed to provide a vaguely PHP-like interface
+where each resource corresponds to a particular python file on the
+filesystem. Typically this is hooked up to a route like ``("*",
+"*.py", python_file_handler)``, meaning that any .py file will be
+treated as a handler file (note that this makes python files unsafe in
+much the same way that .php files are when using PHP).
+
+Unlike PHP, the python files don't work by outputting text to stdout
+from the global scope. Instead they must define a single function
+`main` with the signature::
+
+ main(request, response)
+
+This function then behaves just like those described in
+:ref:`handlers.Python` above.
+
+asis Handlers
+-------------
+
+These are used to serve files as literal byte streams including the
+HTTP status line, headers and body. In the default configuration this
+handler is invoked for all files with a .asis extension.
+
+File Handlers
+-------------
+
+File handlers are used to serve static files. By default the content
+type of these files is set by examining the file extension. However
+this can be overridden, or additional headers supplied, by providing a
+file with the same name as the file being served but an additional
+.headers suffix, i.e. test.html has its headers set from
+test.html.headers. The format of the .headers file is plaintext, with
+each line containing::
+
+ Header-Name: header_value
+
+In addition headers can be set for a whole directory of files (but not
+subdirectories), using a file called `__dir__.headers`.
diff --git a/testing/web-platform/tests/tools/wptserve/docs/index.rst b/testing/web-platform/tests/tools/wptserve/docs/index.rst
new file mode 100644
index 000000000..a9f630c76
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/index.rst
@@ -0,0 +1,35 @@
+.. wptserve documentation master file, created by
+ sphinx-quickstart on Wed Aug 14 17:23:24 2013.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Web Platform Test Server
+========================
+
+A python-based HTTP server specifically targeted at being used for
+testing the web platform. This means that extreme flexibility —
+including the possibility of HTTP non-conformance — in the response is
+supported.
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ introduction
+ server
+ router
+ request
+ response
+ stash
+ handlers
+ pipes
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/testing/web-platform/tests/tools/wptserve/docs/introduction.rst b/testing/web-platform/tests/tools/wptserve/docs/introduction.rst
new file mode 100644
index 000000000..b585a983a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/introduction.rst
@@ -0,0 +1,51 @@
+Introduction
+============
+
+wptserve has been designed with the specific goal of making a server
+that is suitable for writing tests for the web platform. This means
+that it cannot use common abstractions over HTTP such as WSGI, since
+these assume that the goal is to generate a well-formed HTTP
+response. Testcases, however, often require precise control of the
+exact bytes sent over the wire and their timing. The full list of
+design goals for the server are:
+
+* Suitable to run on individual test machines and over the public internet.
+
+* Support plain TCP and SSL servers.
+
+* Serve static files with the minimum of configuration.
+
+* Allow headers to be overwritten on a per-file and per-directory
+ basis.
+
+* Full customisation of headers sent (e.g. altering or omitting
+ "mandatory" headers).
+
+* Simple per-client state.
+
+* Complex logic in tests, up to precise control over the individual
+ bytes sent and the timing of sending them.
+
+Request Handling
+----------------
+
+At the high level, the design of the server is based around similar
+concepts to those found in common web frameworks like Django, Pyramid
+or Flask. In particular the lifecycle of a typical request will be
+familiar to users of these systems. Incoming requests are parsed and a
+:doc:`Request <request>` object is constructed. This object is passed
+to a :ref:`Router <router.Interface>` instance, which is
+responsible for mapping the request method and path to a handler
+function. This handler is passed two arguments; the request object and
+a :doc:`Response <response>` object. In cases where only simple
+responses are required, the handler function may fill in the
+properties of the response object and the server will take care of
+constructing the response. However each Response also contains a
+:ref:`ResponseWriter <response.Interface>` which can be
+used to directly control the TCP socket.
+
+By default there are several built-in handler functions that provide a
+higher level API than direct manipulation of the Response
+object. These are documented in :doc:`handlers`.
+
+
diff --git a/testing/web-platform/tests/tools/wptserve/docs/make.bat b/testing/web-platform/tests/tools/wptserve/docs/make.bat
new file mode 100644
index 000000000..40c71ff5d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\wptserve.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\wptserve.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/testing/web-platform/tests/tools/wptserve/docs/pipes.rst b/testing/web-platform/tests/tools/wptserve/docs/pipes.rst
new file mode 100644
index 000000000..c606140d4
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/pipes.rst
@@ -0,0 +1,157 @@
+Pipes
+======
+
+Pipes are functions that may be used when serving files to alter parts
+of the response. These are invoked by adding a pipe= query parameter
+taking a | separated list of pipe functions and parameters. The pipe
+functions are applied to the response from left to right. For example::
+
+ GET /sample.txt?pipe=slice(1,200)|status(404).
+
+This would serve bytes 1 to 199, inclusive, of foo.txt with the HTTP status
+code 404.
+
+There are several built-in pipe functions, and it is possible to add
+more using the `@pipe` decorator on a function, if required.
+
+.. note::
+ Because of the way pipes compose, using some pipe functions prevents the
+ content-length of the response from being known in advance. In these cases
+ the server will close the connection to indicate the end of the response,
+ preventing the use of HTTP 1.1 keepalive.
+
+Built-In Pipes
+--------------
+
+sub
+~~~
+
+Used to substitute variables from the server environment, or from the
+request into the response.
+
+Substitutions are marked in a file using a block delimited by `{{`
+and `}}`. Inside the block the following variables are available:
+
+ `{{host}}`
+ The host name of the server excluding any subdomain part.
+
+ `{{domains[]}}`
+ The domain name of a particular subdomain
+ e.g. `{{domains[www]}}` for the `www` subdomain.
+
+ `{{ports[][]}}`
+ The port number of servers, by protocol
+ e.g. `{{ports[http][0]}}` for the first (and, depending on setup,
+ possibly only) http server
+
+ `{{headers[]}}`
+ The HTTP headers in the request
+ e.g. `{{headers[X-Test]}}` for a hypothetical `X-Test` header.
+
+ `{{GET[]}}`
+ The query parameters for the request
+ e.g. `{{GET[id]}}` for an id parameter sent with the request.
+
+So, for example, to write a javascript file called `xhr.js` that
+depends on the host name of the server, without hardcoding, one might
+write::
+
+ var server_url = http://{{host}}:{{ports[http][0]}}/path/to/resource;
+ //Create the actual XHR and so on
+
+The file would then be included as:
+
+ <script src="xhr.js?pipe=sub"></script>
+
+This pipe can also be enabled by using a filename `*.sub.ext`, e.g. the file above could be called `xhr.sub.js`.
+
+status
+~~~~~~
+
+Used to set the HTTP status of the response, for example::
+
+ example.js?pipe=status(410)
+
+headers
+~~~~~~~
+
+Used to add or replace http headers in the response. Takes two or
+three arguments; the header name, the header value and whether to
+append the header rather than replace an existing header (default:
+False). So, for example, a request for::
+
+ example.html?pipe=header(Content-Type,text/plain)
+
+causes example.html to be returned with a text/plain content type
+whereas::
+
+ example.html?pipe=header(Content-Type,text/plain,True)
+
+Will cause example.html to be returned with both text/html and
+text/plain content-type headers.
+
+slice
+~~~~~
+
+Used to send only part of a response body. Takes the start and,
+optionally, end bytes as arguments, although either can be null to
+indicate the start or end of the file, respectively. So for example::
+
+ example.txt?pipe=slice(10,20)
+
+Would result in a response with a body containing 10 bytes of
+example.txt including byte 10 but excluding byte 20.
+
+::
+
+ example.txt?pipe=slice(10)
+
+Would cause all bytes from byte 10 of example.txt to be sent, but::
+
+ example.txt?pipe=slice(null,20)
+
+Would send the first 20 bytes of example.txt.
+
+trickle
+~~~~~~~
+
+.. note::
+ Using this function will force a connection close.
+
+Used to send the body of a response in chunks with delays. Takes a
+single argument that is a microsyntax consisting of colon-separated
+commands. There are three types of commands:
+
+* Bare numbers represent a number of bytes to send
+
+* Numbers prefixed `d` indicate a delay in seconds
+
+* Numbers prefixed `r` must only appear at the end of the command, and
+ indicate that the preceding N items must be repeated until there is
+ no more content to send. The number of items to repeat must be even.
+
+In the absence of a repetition command, the entire remainder of the content is
+sent at once when the command list is exhausted. So for example::
+
+ example.txt?pipe=trickle(d1)
+
+causes a 1s delay before sending the entirety of example.txt.
+
+::
+
+ example.txt?pipe=trickle(100:d1)
+
+causes 100 bytes of example.txt to be sent, followed by a 1s delay,
+and then the remainder of the file to be sent. On the other hand::
+
+ example.txt?pipe=trickle(100:d1:r2)
+
+Will cause the file to be sent in 100 byte chunks separated by a 1s
+delay until the whole content has been sent.
+
+
+:mod:`Interface <pipes>`
+------------------------
+
+.. automodule:: wptserve.pipes
+ :members:
diff --git a/testing/web-platform/tests/tools/wptserve/docs/request.rst b/testing/web-platform/tests/tools/wptserve/docs/request.rst
new file mode 100644
index 000000000..790e4f0bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/request.rst
@@ -0,0 +1,10 @@
+Request
+=======
+
+Request object.
+
+:mod:`Interface <request>`
+--------------------------
+
+.. automodule:: wptserve.request
+ :members:
diff --git a/testing/web-platform/tests/tools/wptserve/docs/response.rst b/testing/web-platform/tests/tools/wptserve/docs/response.rst
new file mode 100644
index 000000000..23075dfb3
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/response.rst
@@ -0,0 +1,41 @@
+Response
+========
+
+Response object. This object is used to control the response that will
+be sent to the HTTP client. A handler function will take the response
+object and fill in various parts of the response. For example, a plain
+text response with the body 'Some example content' could be produced as::
+
+ def handler(request, response):
+ response.headers.set("Content-Type", "text/plain")
+ response.content = "Some example content"
+
+The response object also gives access to a ResponseWriter, which
+allows direct access to the response socket. For example, one could
+write a similar response but with more explicit control as follows::
+
+ import time
+
+ def handler(request, response):
+ response.add_required_headers = False # Don't implicitly add HTTP headers
+ response.writer.write_status(200)
+ response.writer.write_header("Content-Type", "text/plain")
+ response.writer.write_header("Content-Length", len("Some example content"))
+ response.writer.end_headers()
+ response.writer.write("Some ")
+ time.sleep(1)
+ response.writer.write("example content")
+
+Note that when writing the response directly like this it is always
+necessary to either set the Content-Length header or set
+`response.close_connection = True`. Without one of these, the client
+will not be able to determine where the response body ends and will
+continue to load indefinitely.
+
+.. _response.Interface:
+
+:mod:`Interface <response>`
+---------------------------
+
+.. automodule:: wptserve.response
+ :members:
diff --git a/testing/web-platform/tests/tools/wptserve/docs/router.rst b/testing/web-platform/tests/tools/wptserve/docs/router.rst
new file mode 100644
index 000000000..21d67d222
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/router.rst
@@ -0,0 +1,78 @@
+Router
+======
+
+The router is used to match incoming requests to request handler
+functions. Typically users don't interact with the router directly,
+but instead send a list of routes to register when starting the
+server. However it is also possible to add routes after starting the
+server by calling the `register` method on the server's `router`
+property.
+
+Routes are represented by a three item tuple::
+
+ (methods, path_match, handler)
+
+`methods` is either a string or a list of strings indicating the HTTP
+methods to match. In cases where all methods should match there is a
+special sentinel value `any_method` provided as a property of the
+`router` module that can be used.
+
+`path_match` is an expression that will be evaluated against the
+request path to decide if the handler should match. These expressions
+follow a custom syntax intended to make matching URLs straightforward
+and, in particular, to be easier to use than raw regexp for URL
+matching. There are three possible components of a match expression:
+
+* Literals. These match any character. The special characters \*, \{
+ and \} must be escaped by prefixing them with a \\.
+
+* Match groups. These match any character other than / and save the
+ result as a named group. They are delimited by curly braces; for
+ example::
+
+ {abc}
+
+ would create a match group with the name `abc`.
+
+* Stars. These are denoted with a `*` and match any character
+ including /. There can be at most one star
+ per pattern and it must follow any match groups.
+
+Path expressions always match the entire request path and a leading /
+in the expression is implied even if it is not explicitly
+provided. This means that `/foo` and `foo` are equivalent.
+
+For example, the following pattern matches all requests for resources with the
+extension `.py`::
+
+ *.py
+
+The following expression matches anything directly under `/resources`
+with a `.html` extension, and places the "filename" in the `name`
+group::
+
+ /resources/{name}.html
+
+The groups, including anything that matches a `*` are available in the
+request object through the `route_match` property. This is a
+dictionary mapping the group names, and any match for `*` to the
+matching part of the route. For example, given a route::
+
+ /api/{sub_api}/*
+
+and the request path `/api/test/html/test.html`, `route_match` would
+be::
+
+ {"sub_api": "html", "*": "html/test.html"}
+
+`handler` is a function taking a request and a response object that is
+responsible for constructing the response to the HTTP request. See
+:doc:`handlers` for more details on handler functions.
+
+.. _router.Interface:
+
+:mod:`Interface <wptserve>`
+---------------------------
+
+.. automodule:: wptserve.router
+ :members:
diff --git a/testing/web-platform/tests/tools/wptserve/docs/server.rst b/testing/web-platform/tests/tools/wptserve/docs/server.rst
new file mode 100644
index 000000000..732f9fdc7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/server.rst
@@ -0,0 +1,20 @@
+Server
+======
+
+Basic server classes and router.
+
+The following example creates a server that serves static files from
+the `files` subdirectory of the current directory and causes it to
+run on port 8080 until it is killed::
+
+ from wptserve import server, handlers
+
+ httpd = server.WebTestHttpd(port=8080, doc_root="./files/",
+ routes=[("GET", "*", handlers.file_handler)])
+ httpd.start(block=True)
+
+:mod:`Interface <wptserve>`
+---------------------------
+
+.. automodule:: wptserve.server
+ :members:
diff --git a/testing/web-platform/tests/tools/wptserve/docs/stash.rst b/testing/web-platform/tests/tools/wptserve/docs/stash.rst
new file mode 100644
index 000000000..821c2d344
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/docs/stash.rst
@@ -0,0 +1,31 @@
+Stash
+=====
+
+Object for storing cross-request state. This is unusual in that keys
+must be UUIDs, in order to prevent different clients setting the same
+key, and values are write-once, read-once to minimise the chances of
+state persisting indefinitely. The stash defines two operations;
+`put`, to add state and `take` to remove state. Furthermore, the view
+of the stash is path-specific; by default a request will only see the
+part of the stash corresponding to its own path.
+
+A typical example of using a stash to store state might be::
+
+ @handler
+ def handler(request, response):
+ # We assume this is a string representing a UUID
+ key = request.GET.first("id")
+
+ if request.method == "POST":
+ request.server.stash.put(key, "Some sample value")
+ return "Added value to stash"
+ else:
+ value = request.server.stash.take(key)
+ assert request.server.stash.take(key) is None
+ return key
+
+:mod:`Interface <stash>`
+------------------------
+
+.. automodule:: wptserve.stash
+ :members:
diff --git a/testing/web-platform/tests/tools/wptserve/setup.py b/testing/web-platform/tests/tools/wptserve/setup.py
new file mode 100644
index 000000000..956e20922
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/setup.py
@@ -0,0 +1,23 @@
+from setuptools import setup
+
+PACKAGE_VERSION = '1.4.0'
+deps = []
+
+setup(name='wptserve',
+ version=PACKAGE_VERSION,
+ description="Python webserver intended for in web browser testing",
+ long_description=open("README.md").read(),
+ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=["Development Status :: 5 - Production/Stable",
+ "License :: OSI Approved :: BSD License",
+ "Topic :: Internet :: WWW/HTTP :: HTTP Servers"],
+ keywords='',
+ author='James Graham',
+ author_email='james@hoppipolla.co.uk',
+ url='http://wptserve.readthedocs.org/',
+ license='BSD',
+ packages=['wptserve'],
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=deps
+ )
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/__init__.py b/testing/web-platform/tests/tools/wptserve/tests/functional/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/__init__.py
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/base.py b/testing/web-platform/tests/tools/wptserve/tests/functional/base.py
new file mode 100644
index 000000000..eae7e87d9
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/base.py
@@ -0,0 +1,65 @@
+from __future__ import print_function
+
+import base64
+import logging
+import os
+import unittest
+import urllib
+import urllib2
+import urlparse
+
+import wptserve
+
+logging.basicConfig()
+
+wptserve.logger.set_logger(logging.getLogger())
+
+here = os.path.split(__file__)[0]
+doc_root = os.path.join(here, "docroot")
+
+class Request(urllib2.Request):
+ def __init__(self, *args, **kwargs):
+ urllib2.Request.__init__(self, *args, **kwargs)
+ self.method = "GET"
+
+ def get_method(self):
+ return self.method
+
+ def add_data(self, data):
+ if hasattr(data, "iteritems"):
+ data = urllib.urlencode(data)
+ print(data)
+ self.add_header("Content-Length", str(len(data)))
+ urllib2.Request.add_data(self, data)
+
+class TestUsingServer(unittest.TestCase):
+ def setUp(self):
+ self.server = wptserve.server.WebTestHttpd(host="localhost",
+ port=0,
+ use_ssl=False,
+ certificate=None,
+ doc_root=doc_root)
+ self.server.start(False)
+
+ def tearDown(self):
+ self.server.stop()
+
+ def abs_url(self, path, query=None):
+ return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
+
+ def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
+ req = Request(self.abs_url(path, query))
+ req.method = method
+ if headers is None:
+ headers = {}
+
+ for name, value in headers.iteritems():
+ req.add_header(name, value)
+
+ if body is not None:
+ req.add_data(body)
+
+ if auth is not None:
+ req.add_header("Authorization", "Basic %s" % base64.b64encode('%s:%s' % auth))
+
+ return urllib2.urlopen(req)
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/document.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/document.txt
new file mode 100644
index 000000000..611dccd84
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/document.txt
@@ -0,0 +1 @@
+This is a test document
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/invalid.py b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/invalid.py
new file mode 100644
index 000000000..017d4d9d6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/invalid.py
@@ -0,0 +1,3 @@
+# Oops...
+def main(request, response
+ return "FAIL"
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/no_main.py b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/no_main.py
new file mode 100644
index 000000000..cee379fe1
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/no_main.py
@@ -0,0 +1,3 @@
+# Oops...
+def mian(request, response):
+ return "FAIL"
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.sub.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.sub.txt
new file mode 100644
index 000000000..4302db16a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.sub.txt
@@ -0,0 +1 @@
+{{host}} {{domains[]}} {{ports[http][0]}}
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.txt
new file mode 100644
index 000000000..4302db16a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub.txt
@@ -0,0 +1 @@
+{{host}} {{domains[]}} {{ports[http][0]}}
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.sub.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.sub.txt
new file mode 100644
index 000000000..ee021eb86
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.sub.txt
@@ -0,0 +1 @@
+{{headers[X-Test]}}
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.txt
new file mode 100644
index 000000000..ee021eb86
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_headers.txt
@@ -0,0 +1 @@
+{{headers[X-Test]}}
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.sub.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.sub.txt
new file mode 100644
index 000000000..8323878d6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.sub.txt
@@ -0,0 +1 @@
+{{GET[test]}}
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.txt
new file mode 100644
index 000000000..8323878d6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/sub_params.txt
@@ -0,0 +1 @@
+{{GET[test]}}
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/subdir/file.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/subdir/file.txt
new file mode 100644
index 000000000..06d84d30d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/subdir/file.txt
@@ -0,0 +1 @@
+I am here to ensure that my containing directory exists.
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test.asis b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test.asis
new file mode 100644
index 000000000..b05ba7da8
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test.asis
@@ -0,0 +1,5 @@
+HTTP/1.1 202 Giraffe
+X-TEST: PASS
+Content-Length: 7
+
+Content \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_string.py b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_string.py
new file mode 100644
index 000000000..8fa605bb1
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_string.py
@@ -0,0 +1,3 @@
+def main(request, response):
+ response.headers.set("Content-Type", "text/plain")
+ return "PASS"
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_2.py b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_2.py
new file mode 100644
index 000000000..fa791fbdd
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_2.py
@@ -0,0 +1,2 @@
+def main(request, response):
+ return [("Content-Type", "text/html"), ("X-Test", "PASS")], "PASS"
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_3.py b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_3.py
new file mode 100644
index 000000000..2c2656d04
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/test_tuple_3.py
@@ -0,0 +1,2 @@
+def main(request, response):
+ return (202, "Giraffe"), [("Content-Type", "text/html"), ("X-Test", "PASS")], "PASS"
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt
new file mode 100644
index 000000000..45ce1a079
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt
@@ -0,0 +1 @@
+Test document with custom headers
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt.sub.headers b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt.sub.headers
new file mode 100644
index 000000000..71494fccf
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/docroot/with_headers.txt.sub.headers
@@ -0,0 +1,6 @@
+Custom-Header: PASS
+Another-Header: {{$id:uuid()}}
+Same-Value-Header: {{$id}}
+Double-Header: PA
+Double-Header: SS
+Content-Type: text/html
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_cookies.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_cookies.py
new file mode 100644
index 000000000..d1080b4bf
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_cookies.py
@@ -0,0 +1,61 @@
+import unittest
+
+import wptserve
+from .base import TestUsingServer
+
+class TestResponseSetCookie(TestUsingServer):
+ def test_name_value(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ response.set_cookie("name", "value")
+ return "Test"
+
+ route = ("GET", "/test/name_value", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+
+ self.assertEqual(resp.info()["Set-Cookie"], "name=value; Path=/")
+
+ def test_unset(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ response.set_cookie("name", "value")
+ response.unset_cookie("name")
+ return "Test"
+
+ route = ("GET", "/test/unset", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+
+ self.assertTrue("Set-Cookie" not in resp.info())
+
+ def test_delete(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ response.delete_cookie("name")
+ return "Test"
+
+ route = ("GET", "/test/delete", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+
+ parts = dict(item.split("=") for
+ item in resp.info()["Set-Cookie"].split("; ") if item)
+
+ self.assertEqual(parts["name"], "")
+ self.assertEqual(parts["Path"], "/")
+ #Should also check that expires is in the past
+
+class TestRequestCookies(TestUsingServer):
+ def test_set_cookie(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return request.cookies["name"].value
+
+ route = ("GET", "/test/set_cookie", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], headers={"Cookie": "name=value"})
+ self.assertEqual(resp.read(), b"value")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_handlers.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_handlers.py
new file mode 100644
index 000000000..9189725cb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_handlers.py
@@ -0,0 +1,299 @@
+import json
+import os
+import pytest
+import unittest
+import urllib2
+import uuid
+
+import wptserve
+from .base import TestUsingServer, doc_root
+
+class TestFileHandler(TestUsingServer):
+ def test_GET(self):
+ resp = self.request("/document.txt")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("text/plain", resp.info()["Content-Type"])
+ self.assertEqual(open(os.path.join(doc_root, "document.txt"), 'rb').read(), resp.read())
+
+ def test_headers(self):
+ resp = self.request("/with_headers.txt")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("text/html", resp.info()["Content-Type"])
+ self.assertEqual("PASS", resp.info()["Custom-Header"])
+ # This will fail if it isn't a valid uuid
+ uuid.UUID(resp.info()["Another-Header"])
+ self.assertEqual(resp.info()["Same-Value-Header"], resp.info()["Another-Header"])
+ self.assertEqual(resp.info()["Double-Header"], "PA, SS")
+
+
+ def test_range(self):
+ resp = self.request("/document.txt", headers={"Range":"bytes=10-19"})
+ self.assertEqual(206, resp.getcode())
+ data = resp.read()
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(10, len(data))
+ self.assertEqual("bytes 10-19/%i" % len(expected), resp.info()['Content-Range'])
+ self.assertEqual("10", resp.info()['Content-Length'])
+ self.assertEqual(expected[10:20], data)
+
+ def test_range_no_end(self):
+ resp = self.request("/document.txt", headers={"Range":"bytes=10-"})
+ self.assertEqual(206, resp.getcode())
+ data = resp.read()
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(len(expected) - 10, len(data))
+ self.assertEqual("bytes 10-%i/%i" % (len(expected) - 1, len(expected)), resp.info()['Content-Range'])
+ self.assertEqual(expected[10:], data)
+
+ def test_range_no_start(self):
+ resp = self.request("/document.txt", headers={"Range":"bytes=-10"})
+ self.assertEqual(206, resp.getcode())
+ data = resp.read()
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(10, len(data))
+ self.assertEqual("bytes %i-%i/%i" % (len(expected) - 10, len(expected) - 1, len(expected)),
+ resp.info()['Content-Range'])
+ self.assertEqual(expected[-10:], data)
+
+ def test_multiple_ranges(self):
+ resp = self.request("/document.txt", headers={"Range":"bytes=1-2,5-7,6-10"})
+ self.assertEqual(206, resp.getcode())
+ data = resp.read()
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertTrue(resp.info()["Content-Type"].startswith("multipart/byteranges; boundary="))
+ boundary = resp.info()["Content-Type"].split("boundary=")[1]
+ parts = data.split("--" + boundary)
+ self.assertEqual("\r\n", parts[0])
+ self.assertEqual("--", parts[-1])
+ expected_parts = [("1-2", expected[1:3]), ("5-10", expected[5:11])]
+ for expected_part, part in zip(expected_parts, parts[1:-1]):
+ header_string, body = part.split("\r\n\r\n")
+ headers = dict(item.split(": ", 1) for item in header_string.split("\r\n") if item.strip())
+ self.assertEqual(headers["Content-Type"], "text/plain")
+ self.assertEqual(headers["Content-Range"], "bytes %s/%i" % (expected_part[0], len(expected)))
+ self.assertEqual(expected_part[1] + "\r\n", body)
+
+ def test_range_invalid(self):
+ with self.assertRaises(urllib2.HTTPError) as cm:
+ self.request("/document.txt", headers={"Range":"bytes=11-10"})
+ self.assertEqual(cm.exception.code, 416)
+
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ with self.assertRaises(urllib2.HTTPError) as cm:
+ self.request("/document.txt", headers={"Range":"bytes=%i-%i" % (len(expected), len(expected) + 10)})
+ self.assertEqual(cm.exception.code, 416)
+
+ def test_sub_config(self):
+ resp = self.request("/sub.sub.txt")
+ expected = b"localhost localhost %i" % self.server.port
+ assert resp.read().rstrip() == expected
+
+ def test_sub_headers(self):
+ resp = self.request("/sub_headers.sub.txt", headers={"X-Test": "PASS"})
+ expected = b"PASS"
+ assert resp.read().rstrip() == expected
+
+ def test_sub_params(self):
+ resp = self.request("/sub_params.sub.txt", query="test=PASS")
+ expected = b"PASS"
+ assert resp.read().rstrip() == expected
+
+
+class TestFunctionHandler(TestUsingServer):
+ def test_string_rv(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return "test data"
+
+ route = ("GET", "/test/test_string_rv", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("9", resp.info()["Content-Length"])
+ self.assertEqual("test data", resp.read())
+
+ def test_tuple_1_rv(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return ()
+
+ route = ("GET", "/test/test_tuple_1_rv", handler)
+ self.server.router.register(*route)
+
+ with pytest.raises(urllib2.HTTPError) as cm:
+ self.request(route[1])
+
+ assert cm.value.code == 500
+
+ def test_tuple_2_rv(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return [("Content-Length", 4), ("test-header", "test-value")], "test data"
+
+ route = ("GET", "/test/test_tuple_2_rv", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("4", resp.info()["Content-Length"])
+ self.assertEqual("test-value", resp.info()["test-header"])
+ self.assertEqual("test", resp.read())
+
+ def test_tuple_3_rv(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return 202, [("test-header", "test-value")], "test data"
+
+ route = ("GET", "/test/test_tuple_3_rv", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(202, resp.getcode())
+ self.assertEqual("test-value", resp.info()["test-header"])
+ self.assertEqual("test data", resp.read())
+
+ def test_tuple_3_rv_1(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return (202, "Some Status"), [("test-header", "test-value")], "test data"
+
+ route = ("GET", "/test/test_tuple_3_rv_1", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(202, resp.getcode())
+ self.assertEqual("Some Status", resp.msg)
+ self.assertEqual("test-value", resp.info()["test-header"])
+ self.assertEqual("test data", resp.read())
+
+ def test_tuple_4_rv(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return 202, [("test-header", "test-value")], "test data", "garbage"
+
+ route = ("GET", "/test/test_tuple_1_rv", handler)
+ self.server.router.register(*route)
+
+ with pytest.raises(urllib2.HTTPError) as cm:
+ self.request(route[1])
+
+ assert cm.value.code == 500
+
+ def test_none_rv(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return None
+
+ route = ("GET", "/test/test_none_rv", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ assert resp.getcode() == 200
+ assert "Content-Length" not in resp.info()
+ assert resp.read() == b""
+
+
+class TestJSONHandler(TestUsingServer):
+ def test_json_0(self):
+ @wptserve.handlers.json_handler
+ def handler(request, response):
+ return {"data": "test data"}
+
+ route = ("GET", "/test/test_json_0", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual({"data": "test data"}, json.load(resp))
+
+ def test_json_tuple_2(self):
+ @wptserve.handlers.json_handler
+ def handler(request, response):
+ return [("Test-Header", "test-value")], {"data": "test data"}
+
+ route = ("GET", "/test/test_json_tuple_2", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("test-value", resp.info()["test-header"])
+ self.assertEqual({"data": "test data"}, json.load(resp))
+
+ def test_json_tuple_3(self):
+ @wptserve.handlers.json_handler
+ def handler(request, response):
+ return (202, "Giraffe"), [("Test-Header", "test-value")], {"data": "test data"}
+
+ route = ("GET", "/test/test_json_tuple_2", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1])
+ self.assertEqual(202, resp.getcode())
+ self.assertEqual("Giraffe", resp.msg)
+ self.assertEqual("test-value", resp.info()["test-header"])
+ self.assertEqual({"data": "test data"}, json.load(resp))
+
+class TestPythonHandler(TestUsingServer):
+ def test_string(self):
+ resp = self.request("/test_string.py")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("text/plain", resp.info()["Content-Type"])
+ self.assertEqual("PASS", resp.read())
+
+ def test_tuple_2(self):
+ resp = self.request("/test_tuple_2.py")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("text/html", resp.info()["Content-Type"])
+ self.assertEqual("PASS", resp.info()["X-Test"])
+ self.assertEqual("PASS", resp.read())
+
+ def test_tuple_3(self):
+ resp = self.request("/test_tuple_3.py")
+ self.assertEqual(202, resp.getcode())
+ self.assertEqual("Giraffe", resp.msg)
+ self.assertEqual("text/html", resp.info()["Content-Type"])
+ self.assertEqual("PASS", resp.info()["X-Test"])
+ self.assertEqual("PASS", resp.read())
+
+ def test_no_main(self):
+ with pytest.raises(urllib2.HTTPError) as cm:
+ self.request("/no_main.py")
+
+ assert cm.value.code == 500
+
+ def test_invalid(self):
+ with pytest.raises(urllib2.HTTPError) as cm:
+ self.request("/invalid.py")
+
+ assert cm.value.code == 500
+
+ def test_missing(self):
+ with pytest.raises(urllib2.HTTPError) as cm:
+ self.request("/missing.py")
+
+ assert cm.value.code == 404
+
+
+class TestDirectoryHandler(TestUsingServer):
+ def test_directory(self):
+ resp = self.request("/")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("text/html", resp.info()["Content-Type"])
+ #Add a check that the response is actually sane
+
+ def test_subdirectory_trailing_slash(self):
+ resp = self.request("/subdir/")
+ assert resp.getcode() == 200
+ assert resp.info()["Content-Type"] == "text/html"
+
+ def test_subdirectory_no_trailing_slash(self):
+ with pytest.raises(urllib2.HTTPError) as cm:
+ self.request("/subdir")
+
+ assert cm.value.code == 404
+
+
+class TestAsIsHandler(TestUsingServer):
+ def test_as_is(self):
+ resp = self.request("/test.asis")
+ self.assertEqual(202, resp.getcode())
+ self.assertEqual("Giraffe", resp.msg)
+ self.assertEqual("PASS", resp.info()["X-Test"])
+ self.assertEqual("Content", resp.read())
+ #Add a check that the response is actually sane
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_pipes.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_pipes.py
new file mode 100644
index 000000000..af5068108
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_pipes.py
@@ -0,0 +1,77 @@
+import os
+import unittest
+import time
+
+from .base import TestUsingServer, doc_root
+
+class TestStatus(TestUsingServer):
+ def test_status(self):
+ resp = self.request("/document.txt", query="pipe=status(202)")
+ self.assertEqual(resp.getcode(), 202)
+
+class TestHeader(TestUsingServer):
+ def test_not_set(self):
+ resp = self.request("/document.txt", query="pipe=header(X-TEST,PASS)")
+ self.assertEqual(resp.info()["X-TEST"], "PASS")
+
+ def test_set(self):
+ resp = self.request("/document.txt", query="pipe=header(Content-Type,text/html)")
+ self.assertEqual(resp.info()["Content-Type"], "text/html")
+
+ def test_multiple(self):
+ resp = self.request("/document.txt", query="pipe=header(X-Test,PASS)|header(Content-Type,text/html)")
+ self.assertEqual(resp.info()["X-TEST"], "PASS")
+ self.assertEqual(resp.info()["Content-Type"], "text/html")
+
+ def test_multiple_same(self):
+ resp = self.request("/document.txt", query="pipe=header(Content-Type,FAIL)|header(Content-Type,text/html)")
+ self.assertEqual(resp.info()["Content-Type"], "text/html")
+
+ def test_multiple_append(self):
+ resp = self.request("/document.txt", query="pipe=header(X-Test,1)|header(X-Test,2,True)")
+ self.assertEqual(resp.info()["X-Test"], "1, 2")
+
+class TestSlice(TestUsingServer):
+ def test_both_bounds(self):
+ resp = self.request("/document.txt", query="pipe=slice(1,10)")
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(resp.read(), expected[1:10])
+
+ def test_no_upper(self):
+ resp = self.request("/document.txt", query="pipe=slice(1)")
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(resp.read(), expected[1:])
+
+ def test_no_lower(self):
+ resp = self.request("/document.txt", query="pipe=slice(null,10)")
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(resp.read(), expected[:10])
+
+class TestSub(TestUsingServer):
+ def test_sub_config(self):
+ resp = self.request("/sub.txt", query="pipe=sub")
+ expected = "localhost localhost %i" % self.server.port
+ self.assertEqual(resp.read().rstrip(), expected)
+
+ def test_sub_headers(self):
+ resp = self.request("/sub_headers.txt", query="pipe=sub", headers={"X-Test": "PASS"})
+ expected = "PASS"
+ self.assertEqual(resp.read().rstrip(), expected)
+
+ def test_sub_params(self):
+ resp = self.request("/sub_params.txt", query="test=PASS&pipe=sub")
+ expected = "PASS"
+ self.assertEqual(resp.read().rstrip(), expected)
+
+class TestTrickle(TestUsingServer):
+ def test_trickle(self):
+ #Actually testing that the response trickles in is not that easy
+ t0 = time.time()
+ resp = self.request("/document.txt", query="pipe=trickle(1:d2:5:d1:r2)")
+ t1 = time.time()
+ expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
+ self.assertEqual(resp.read(), expected)
+ self.assertGreater(6, t1-t0)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_request.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_request.py
new file mode 100644
index 000000000..40dfe7703
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_request.py
@@ -0,0 +1,82 @@
+import unittest
+
+import wptserve
+from .base import TestUsingServer
+
+class TestInputFile(TestUsingServer):
+ def test_seek(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ rv = []
+ f = request.raw_input
+ f.seek(5)
+ rv.append(f.read(2))
+ rv.append(f.tell())
+ f.seek(0)
+ rv.append(f.readline())
+ rv.append(f.tell())
+ rv.append(f.read(-1))
+ rv.append(f.tell())
+ f.seek(0)
+ rv.append(f.read())
+ f.seek(0)
+ rv.extend(f.readlines())
+
+ return " ".join(str(item) for item in rv)
+
+ route = ("POST", "/test/test_seek", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], method="POST", body="12345ab\ncdef")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual(["ab", "7", "12345ab\n", "8", "cdef", "12",
+ "12345ab\ncdef", "12345ab\n", "cdef"],
+ resp.read().split(" "))
+
+ def test_iter(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ f = request.raw_input
+ return " ".join(line for line in f)
+
+ route = ("POST", "/test/test_iter", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], method="POST", body="12345\nabcdef\r\nzyxwv")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual(["12345\n", "abcdef\r\n", "zyxwv"], resp.read().split(" "))
+
+class TestRequest(TestUsingServer):
+ def test_body(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ request.raw_input.seek(5)
+ return request.body
+
+ route = ("POST", "/test/test_body", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], method="POST", body="12345ab\ncdef")
+ self.assertEqual("12345ab\ncdef", resp.read())
+
+ def test_route_match(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return request.route_match["match"] + " " + request.route_match["*"]
+
+ route = ("GET", "/test/{match}_*", handler)
+ self.server.router.register(*route)
+ resp = self.request("/test/some_route")
+ self.assertEqual("some route", resp.read())
+
+class TestAuth(TestUsingServer):
+ def test_auth(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return " ".join((request.auth.username, request.auth.password))
+
+ route = ("GET", "/test/test_auth", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], auth=("test", "PASS"))
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual(["test", "PASS"], resp.read().split(" "))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_response.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_response.py
new file mode 100644
index 000000000..e9808b54e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_response.py
@@ -0,0 +1,47 @@
+import unittest
+from types import MethodType
+
+import wptserve
+from .base import TestUsingServer
+
+def send_body_as_header(self):
+ if self._response.add_required_headers:
+ self.write_default_headers()
+
+ self.write("X-Body: ")
+ self._headers_complete = True
+
+class TestResponse(TestUsingServer):
+ def test_head_without_body(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ response.writer.end_headers = MethodType(send_body_as_header,
+ response.writer,
+ wptserve.response.ResponseWriter)
+ return [("X-Test", "TEST")], "body\r\n"
+
+ route = ("GET", "/test/test_head_without_body", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], method="HEAD")
+ self.assertEqual("6", resp.info()['Content-Length'])
+ self.assertEqual("TEST", resp.info()['x-Test'])
+ self.assertEqual("", resp.info()['x-body'])
+
+ def test_head_with_body(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ response.send_body_for_head_request = True
+ response.writer.end_headers = MethodType(send_body_as_header,
+ response.writer,
+ wptserve.response.ResponseWriter)
+ return [("X-Test", "TEST")], "body\r\n"
+
+ route = ("GET", "/test/test_head_with_body", handler)
+ self.server.router.register(*route)
+ resp = self.request(route[1], method="HEAD")
+ self.assertEqual("6", resp.info()['Content-Length'])
+ self.assertEqual("TEST", resp.info()['x-Test'])
+ self.assertEqual("body", resp.info()['X-Body'])
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_server.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_server.py
new file mode 100644
index 000000000..7681f4412
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_server.py
@@ -0,0 +1,41 @@
+import unittest
+import urllib2
+
+import wptserve
+from .base import TestUsingServer
+
+class TestFileHandler(TestUsingServer):
+ def test_not_handled(self):
+ with self.assertRaises(urllib2.HTTPError) as cm:
+ resp = self.request("/not_existing")
+
+ self.assertEqual(cm.exception.code, 404)
+
+class TestRewriter(TestUsingServer):
+ def test_rewrite(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ return request.request_path
+
+ route = ("GET", "/test/rewritten", handler)
+ self.server.rewriter.register("GET", "/test/original", route[1])
+ self.server.router.register(*route)
+ resp = self.request("/test/original")
+ self.assertEqual(200, resp.getcode())
+ self.assertEqual("/test/rewritten", resp.read())
+
+class TestRequestHandler(TestUsingServer):
+ def test_exception(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ raise Exception
+
+ route = ("GET", "/test/raises", handler)
+ self.server.router.register(*route)
+ with self.assertRaises(urllib2.HTTPError) as cm:
+ resp = self.request("/test/raises")
+
+ self.assertEqual(cm.exception.code, 500)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tests/functional/test_stash.py b/testing/web-platform/tests/tools/wptserve/tests/functional/test_stash.py
new file mode 100644
index 000000000..134293d34
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tests/functional/test_stash.py
@@ -0,0 +1,41 @@
+import unittest
+import uuid
+
+import wptserve
+from wptserve.router import any_method
+from wptserve.stash import StashServer
+from .base import TestUsingServer
+
+class TestResponseSetCookie(TestUsingServer):
+ def run(self, result=None):
+ with StashServer(None, authkey=str(uuid.uuid4())):
+ super(TestResponseSetCookie, self).run(result)
+
+ def test_put_take(self):
+ @wptserve.handlers.handler
+ def handler(request, response):
+ if request.method == "POST":
+ request.server.stash.put(request.POST.first("id"), request.POST.first("data"))
+ data = "OK"
+ elif request.method == "GET":
+ data = request.server.stash.take(request.GET.first("id"))
+ if data is None:
+ return "NOT FOUND"
+ return data
+
+ id = str(uuid.uuid4())
+ route = (any_method, "/test/put_take", handler)
+ self.server.router.register(*route)
+
+ resp = self.request(route[1], method="POST", body={"id": id, "data": "Sample data"})
+ self.assertEqual(resp.read(), "OK")
+
+ resp = self.request(route[1], query="id=" + id)
+ self.assertEqual(resp.read(), "Sample data")
+
+ resp = self.request(route[1], query="id=" + id)
+ self.assertEqual(resp.read(), "NOT FOUND")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptserve/tox.ini b/testing/web-platform/tests/tools/wptserve/tox.ini
new file mode 100644
index 000000000..9532ca4c2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+envlist = py27,pypy
+
+[testenv]
+deps =
+ coverage
+ flake8
+ pytest
+
+commands =
+ coverage run -m pytest tests/functional
+ flake8
+
+[flake8]
+ignore = E128,E129,E221,E226,E231,E251,E265,E302,E303,E402,E901,F821,F841
+max-line-length = 141
+exclude=docs,.git,__pycache__,.tox,.eggs,*.egg,tests/functional/docroot/
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/__init__.py b/testing/web-platform/tests/tools/wptserve/wptserve/__init__.py
new file mode 100644
index 000000000..a286bfe0b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/__init__.py
@@ -0,0 +1,3 @@
+from .server import WebTestHttpd, WebTestServer, Router # noqa: F401
+from .request import Request # noqa: F401
+from .response import Response # noqa: F401
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/constants.py b/testing/web-platform/tests/tools/wptserve/wptserve/constants.py
new file mode 100644
index 000000000..bd36344a4
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/constants.py
@@ -0,0 +1,92 @@
+from . import utils
+
+content_types = utils.invert_dict({"text/html": ["htm", "html"],
+ "application/json": ["json"],
+ "application/xhtml+xml": ["xht", "xhtm", "xhtml"],
+ "application/xml": ["xml"],
+ "application/x-xpinstall": ["xpi"],
+ "text/javascript": ["js"],
+ "text/css": ["css"],
+ "text/plain": ["txt", "md"],
+ "image/svg+xml": ["svg"],
+ "image/gif": ["gif"],
+ "image/jpeg": ["jpg", "jpeg"],
+ "image/png": ["png"],
+ "image/bmp": ["bmp"],
+ "text/event-stream": ["event_stream"],
+ "text/cache-manifest": ["manifest"],
+ "video/mp4": ["mp4", "m4v"],
+ "audio/mp4": ["m4a"],
+ "audio/mpeg": ["mp3"],
+ "video/webm": ["webm"],
+ "audio/webm": ["weba"],
+ "video/ogg": ["ogg", "ogv"],
+ "audio/ogg": ["oga"],
+ "audio/x-wav": ["wav"],
+ "text/vtt": ["vtt"],})
+
+response_codes = {
+ 100: ('Continue', 'Request received, please continue'),
+ 101: ('Switching Protocols',
+ 'Switching to new protocol; obey Upgrade header'),
+
+ 200: ('OK', 'Request fulfilled, document follows'),
+ 201: ('Created', 'Document created, URL follows'),
+ 202: ('Accepted',
+ 'Request accepted, processing continues off-line'),
+ 203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
+ 204: ('No Content', 'Request fulfilled, nothing follows'),
+ 205: ('Reset Content', 'Clear input form for further input.'),
+ 206: ('Partial Content', 'Partial content follows.'),
+
+ 300: ('Multiple Choices',
+ 'Object has several resources -- see URI list'),
+ 301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
+ 302: ('Found', 'Object moved temporarily -- see URI list'),
+ 303: ('See Other', 'Object moved -- see Method and URL list'),
+ 304: ('Not Modified',
+ 'Document has not changed since given time'),
+ 305: ('Use Proxy',
+ 'You must use proxy specified in Location to access this '
+ 'resource.'),
+ 307: ('Temporary Redirect',
+ 'Object moved temporarily -- see URI list'),
+
+ 400: ('Bad Request',
+ 'Bad request syntax or unsupported method'),
+ 401: ('Unauthorized',
+ 'No permission -- see authorization schemes'),
+ 402: ('Payment Required',
+ 'No payment -- see charging schemes'),
+ 403: ('Forbidden',
+ 'Request forbidden -- authorization will not help'),
+ 404: ('Not Found', 'Nothing matches the given URI'),
+ 405: ('Method Not Allowed',
+ 'Specified method is invalid for this resource.'),
+ 406: ('Not Acceptable', 'URI not available in preferred format.'),
+ 407: ('Proxy Authentication Required', 'You must authenticate with '
+ 'this proxy before proceeding.'),
+ 408: ('Request Timeout', 'Request timed out; try again later.'),
+ 409: ('Conflict', 'Request conflict.'),
+ 410: ('Gone',
+ 'URI no longer exists and has been permanently removed.'),
+ 411: ('Length Required', 'Client must specify Content-Length.'),
+ 412: ('Precondition Failed', 'Precondition in headers is false.'),
+ 413: ('Request Entity Too Large', 'Entity is too large.'),
+ 414: ('Request-URI Too Long', 'URI is too long.'),
+ 415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
+ 416: ('Requested Range Not Satisfiable',
+ 'Cannot satisfy request range.'),
+ 417: ('Expectation Failed',
+ 'Expect condition could not be satisfied.'),
+
+ 500: ('Internal Server Error', 'Server got itself in trouble'),
+ 501: ('Not Implemented',
+ 'Server does not support this operation'),
+ 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
+ 503: ('Service Unavailable',
+ 'The server cannot process the request due to a high load'),
+ 504: ('Gateway Timeout',
+ 'The gateway server did not receive a timely response'),
+ 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
+}
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/handlers.py b/testing/web-platform/tests/tools/wptserve/wptserve/handlers.py
new file mode 100644
index 000000000..c40321dfe
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/handlers.py
@@ -0,0 +1,370 @@
+import cgi
+import json
+import os
+import traceback
+import urllib
+import urlparse
+
+from .constants import content_types
+from .pipes import Pipeline, template
+from .ranges import RangeParser
+from .request import Authentication
+from .response import MultipartContent
+from .utils import HTTPException
+
+__all__ = ["file_handler", "python_script_handler",
+ "FunctionHandler", "handler", "json_handler",
+ "as_is_handler", "ErrorHandler", "BasicAuthHandler"]
+
+
+def guess_content_type(path):
+ ext = os.path.splitext(path)[1].lstrip(".")
+ if ext in content_types:
+ return content_types[ext]
+
+ return "application/octet-stream"
+
+
+
+def filesystem_path(base_path, request, url_base="/"):
+ if base_path is None:
+ base_path = request.doc_root
+
+ path = urllib.unquote(request.url_parts.path)
+
+ if path.startswith(url_base):
+ path = path[len(url_base):]
+
+ if ".." in path:
+ raise HTTPException(404)
+
+ new_path = os.path.join(base_path, path)
+
+ # Otherwise setting path to / allows access outside the root directory
+ if not new_path.startswith(base_path):
+ raise HTTPException(404)
+
+ return new_path
+
+class DirectoryHandler(object):
+ def __init__(self, base_path=None, url_base="/"):
+ self.base_path = base_path
+ self.url_base = url_base
+
+ def __repr__(self):
+ return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
+
+ def __call__(self, request, response):
+ url_path = request.url_parts.path
+
+ if not url_path.endswith("/"):
+ raise HTTPException(404)
+
+ path = filesystem_path(self.base_path, request, self.url_base)
+
+ assert os.path.isdir(path)
+
+ response.headers = [("Content-Type", "text/html")]
+ response.content = """<!doctype html>
+<meta name="viewport" content="width=device-width">
+<title>Directory listing for %(path)s</title>
+<h1>Directory listing for %(path)s</h1>
+<ul>
+%(items)s
+</ul>
+""" % {"path": cgi.escape(url_path),
+ "items": "\n".join(self.list_items(url_path, path))} # flake8: noqa
+
+ def list_items(self, base_path, path):
+ assert base_path.endswith("/")
+
+ # TODO: this won't actually list all routes, only the
+ # ones that correspond to a real filesystem path. It's
+ # not possible to list every route that will match
+ # something, but it should be possible to at least list the
+ # statically defined ones
+
+ if base_path != "/":
+ link = urlparse.urljoin(base_path, "..")
+ yield ("""<li class="dir"><a href="%(link)s">%(name)s</a></li>""" %
+ {"link": link, "name": ".."})
+ for item in sorted(os.listdir(path)):
+ link = cgi.escape(urllib.quote(item))
+ if os.path.isdir(os.path.join(path, item)):
+ link += "/"
+ class_ = "dir"
+ else:
+ class_ = "file"
+ yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a></li>""" %
+ {"link": link, "name": cgi.escape(item), "class": class_})
+
+
+class FileHandler(object):
+ def __init__(self, base_path=None, url_base="/"):
+ self.base_path = base_path
+ self.url_base = url_base
+ self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
+
+ def __repr__(self):
+ return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
+
+ def __call__(self, request, response):
+ path = filesystem_path(self.base_path, request, self.url_base)
+
+ if os.path.isdir(path):
+ return self.directory_handler(request, response)
+ try:
+ #This is probably racy with some other process trying to change the file
+ file_size = os.stat(path).st_size
+ response.headers.update(self.get_headers(request, path))
+ if "Range" in request.headers:
+ try:
+ byte_ranges = RangeParser()(request.headers['Range'], file_size)
+ except HTTPException as e:
+ if e.code == 416:
+ response.headers.set("Content-Range", "bytes */%i" % file_size)
+ raise
+ else:
+ byte_ranges = None
+ data = self.get_data(response, path, byte_ranges)
+ response.content = data
+ query = urlparse.parse_qs(request.url_parts.query)
+
+ pipeline = None
+ if "pipe" in query:
+ pipeline = Pipeline(query["pipe"][-1])
+ elif os.path.splitext(path)[0].endswith(".sub"):
+ ml_extensions = {".html", ".htm", ".xht", ".xhtml", ".xml", ".svg"}
+ escape_type = "html" if os.path.splitext(path)[1] in ml_extensions else "none"
+ pipeline = Pipeline("sub(%s)" % escape_type)
+
+ if pipeline is not None:
+ response = pipeline(request, response)
+
+ return response
+
+ except (OSError, IOError):
+ raise HTTPException(404)
+
+ def get_headers(self, request, path):
+ rv = (self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")) +
+ self.load_headers(request, path))
+
+ if not any(key.lower() == "content-type" for (key, _) in rv):
+ rv.insert(0, ("Content-Type", guess_content_type(path)))
+
+ return rv
+
+ def load_headers(self, request, path):
+ headers_path = path + ".sub.headers"
+ if os.path.exists(headers_path):
+ use_sub = True
+ else:
+ headers_path = path + ".headers"
+ use_sub = False
+
+ try:
+ with open(headers_path) as headers_file:
+ data = headers_file.read()
+ except IOError:
+ return []
+ else:
+ if use_sub:
+ data = template(request, data, escape_type="none")
+ return [tuple(item.strip() for item in line.split(":", 1))
+ for line in data.splitlines() if line]
+
+ def get_data(self, response, path, byte_ranges):
+ """Return either the handle to a file, or a string containing
+ the content of a chunk of the file, if we have a range request."""
+ if byte_ranges is None:
+ return open(path, 'rb')
+ else:
+ with open(path, 'rb') as f:
+ response.status = 206
+ if len(byte_ranges) > 1:
+ parts_content_type, content = self.set_response_multipart(response,
+ byte_ranges,
+ f)
+ for byte_range in byte_ranges:
+ content.append_part(self.get_range_data(f, byte_range),
+ parts_content_type,
+ [("Content-Range", byte_range.header_value())])
+ return content
+ else:
+ response.headers.set("Content-Range", byte_ranges[0].header_value())
+ return self.get_range_data(f, byte_ranges[0])
+
+ def set_response_multipart(self, response, ranges, f):
+ parts_content_type = response.headers.get("Content-Type")
+ if parts_content_type:
+ parts_content_type = parts_content_type[-1]
+ else:
+ parts_content_type = None
+ content = MultipartContent()
+ response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
+ return parts_content_type, content
+
+ def get_range_data(self, f, byte_range):
+ f.seek(byte_range.lower)
+ return f.read(byte_range.upper - byte_range.lower)
+
+
+file_handler = FileHandler()
+
+
+class PythonScriptHandler(object):
+ def __init__(self, base_path=None, url_base="/"):
+ self.base_path = base_path
+ self.url_base = url_base
+
+ def __repr__(self):
+ return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
+
+ def __call__(self, request, response):
+ path = filesystem_path(self.base_path, request, self.url_base)
+
+ try:
+ environ = {"__file__": path}
+ execfile(path, environ, environ)
+ if "main" in environ:
+ handler = FunctionHandler(environ["main"])
+ handler(request, response)
+ else:
+ raise HTTPException(500, "No main function in script %s" % path)
+ except IOError:
+ raise HTTPException(404)
+
+python_script_handler = PythonScriptHandler()
+
+class FunctionHandler(object):
+ def __init__(self, func):
+ self.func = func
+
+ def __call__(self, request, response):
+ try:
+ rv = self.func(request, response)
+ except Exception:
+ msg = traceback.format_exc()
+ raise HTTPException(500, message=msg)
+ if rv is not None:
+ if isinstance(rv, tuple):
+ if len(rv) == 3:
+ status, headers, content = rv
+ response.status = status
+ elif len(rv) == 2:
+ headers, content = rv
+ else:
+ raise HTTPException(500)
+ response.headers.update(headers)
+ else:
+ content = rv
+ response.content = content
+
+
+#The generic name here is so that this can be used as a decorator
+def handler(func):
+ return FunctionHandler(func)
+
+
+class JsonHandler(object):
+ def __init__(self, func):
+ self.func = func
+
+ def __call__(self, request, response):
+ return FunctionHandler(self.handle_request)(request, response)
+
+ def handle_request(self, request, response):
+ rv = self.func(request, response)
+ response.headers.set("Content-Type", "application/json")
+ enc = json.dumps
+ if isinstance(rv, tuple):
+ rv = list(rv)
+ value = tuple(rv[:-1] + [enc(rv[-1])])
+ length = len(value[-1])
+ else:
+ value = enc(rv)
+ length = len(value)
+ response.headers.set("Content-Length", length)
+ return value
+
+def json_handler(func):
+ return JsonHandler(func)
+
+class AsIsHandler(object):
+ def __init__(self, base_path=None, url_base="/"):
+ self.base_path = base_path
+ self.url_base = url_base
+
+ def __call__(self, request, response):
+ path = filesystem_path(self.base_path, request, self.url_base)
+
+ try:
+ with open(path) as f:
+ response.writer.write_content(f.read())
+ response.close_connection = True
+ except IOError:
+ raise HTTPException(404)
+
+as_is_handler = AsIsHandler()
+
+class BasicAuthHandler(object):
+ def __init__(self, handler, user, password):
+ """
+ A Basic Auth handler
+
+ :Args:
+ - handler: a secondary handler for the request after authentication is successful (example file_handler)
+ - user: string of the valid user name or None if any / all credentials are allowed
+ - password: string of the password required
+ """
+ self.user = user
+ self.password = password
+ self.handler = handler
+
+ def __call__(self, request, response):
+ if "authorization" not in request.headers:
+ response.status = 401
+ response.headers.set("WWW-Authenticate", "Basic")
+ return response
+ else:
+ auth = Authentication(request.headers)
+ if self.user is not None and (self.user != auth.username or self.password != auth.password):
+ response.set_error(403, "Invalid username or password")
+ return response
+ return self.handler(request, response)
+
+basic_auth_handler = BasicAuthHandler(file_handler, None, None)
+
+class ErrorHandler(object):
+ def __init__(self, status):
+ self.status = status
+
+ def __call__(self, request, response):
+ response.set_error(self.status)
+
+
+class StaticHandler(object):
+ def __init__(self, path, format_args, content_type, **headers):
+ """Hander that reads a file from a path and substitutes some fixed data
+
+ :param path: Path to the template file to use
+ :param format_args: Dictionary of values to substitute into the template file
+ :param content_type: Content type header to server the response with
+ :param headers: List of headers to send with responses"""
+
+ with open(path) as f:
+ self.data = f.read() % format_args
+
+ self.resp_headers = [("Content-Type", content_type)]
+ for k, v in headers.iteritems():
+ resp_headers.append((k.replace("_", "-"), v))
+
+ self.handler = handler(self.handle_request)
+
+ def handle_request(self, request, response):
+ return self.resp_headers, self.data
+
+ def __call__(self, request, response):
+ rv = self.handler(request, response)
+ return rv
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/logger.py b/testing/web-platform/tests/tools/wptserve/wptserve/logger.py
new file mode 100644
index 000000000..6c91492c7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/logger.py
@@ -0,0 +1,29 @@
+class NoOpLogger(object):
+ def critical(self, msg):
+ pass
+
+ def error(self, msg):
+ pass
+
+ def info(self, msg):
+ pass
+
+ def warning(self, msg):
+ pass
+
+ def debug(self, msg):
+ pass
+
+logger = NoOpLogger()
+_set_logger = False
+
+def set_logger(new_logger):
+ global _set_logger
+ if _set_logger:
+ raise Exception("Logger must be set at most once")
+ global logger
+ logger = new_logger
+ _set_logger = True
+
+def get_logger():
+ return logger
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/pipes.py b/testing/web-platform/tests/tools/wptserve/wptserve/pipes.py
new file mode 100644
index 000000000..41f7dd33e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/pipes.py
@@ -0,0 +1,449 @@
+from cgi import escape
+import gzip as gzip_module
+import re
+import time
+import types
+import uuid
+from cStringIO import StringIO
+
+
+def resolve_content(response):
+ rv = "".join(item for item in response.iter_content(read_file=True))
+ if type(rv) == unicode:
+ rv = rv.encode(response.encoding)
+ return rv
+
+
+class Pipeline(object):
+ pipes = {}
+
+ def __init__(self, pipe_string):
+ self.pipe_functions = self.parse(pipe_string)
+
+ def parse(self, pipe_string):
+ functions = []
+ for item in PipeTokenizer().tokenize(pipe_string):
+ if not item:
+ break
+ if item[0] == "function":
+ functions.append((self.pipes[item[1]], []))
+ elif item[0] == "argument":
+ functions[-1][1].append(item[1])
+ return functions
+
+ def __call__(self, request, response):
+ for func, args in self.pipe_functions:
+ response = func(request, response, *args)
+ return response
+
+
+class PipeTokenizer(object):
+ def __init__(self):
+ #This whole class can likely be replaced by some regexps
+ self.state = None
+
+ def tokenize(self, string):
+ self.string = string
+ self.state = self.func_name_state
+ self._index = 0
+ while self.state:
+ yield self.state()
+ yield None
+
+ def get_char(self):
+ if self._index >= len(self.string):
+ return None
+ rv = self.string[self._index]
+ self._index += 1
+ return rv
+
+ def func_name_state(self):
+ rv = ""
+ while True:
+ char = self.get_char()
+ if char is None:
+ self.state = None
+ if rv:
+ return ("function", rv)
+ else:
+ return None
+ elif char == "(":
+ self.state = self.argument_state
+ return ("function", rv)
+ elif char == "|":
+ if rv:
+ return ("function", rv)
+ else:
+ rv += char
+
+ def argument_state(self):
+ rv = ""
+ while True:
+ char = self.get_char()
+ if char is None:
+ self.state = None
+ return ("argument", rv)
+ elif char == "\\":
+ rv += self.get_escape()
+ if rv is None:
+ #This should perhaps be an error instead
+ return ("argument", rv)
+ elif char == ",":
+ return ("argument", rv)
+ elif char == ")":
+ self.state = self.func_name_state
+ return ("argument", rv)
+ else:
+ rv += char
+
+ def get_escape(self):
+ char = self.get_char()
+ escapes = {"n": "\n",
+ "r": "\r",
+ "t": "\t"}
+ return escapes.get(char, char)
+
+
+class pipe(object):
+ def __init__(self, *arg_converters):
+ self.arg_converters = arg_converters
+ self.max_args = len(self.arg_converters)
+ self.min_args = 0
+ opt_seen = False
+ for item in self.arg_converters:
+ if not opt_seen:
+ if isinstance(item, opt):
+ opt_seen = True
+ else:
+ self.min_args += 1
+ else:
+ if not isinstance(item, opt):
+ raise ValueError("Non-optional argument cannot follow optional argument")
+
+ def __call__(self, f):
+ def inner(request, response, *args):
+ if not (self.min_args <= len(args) <= self.max_args):
+ raise ValueError("Expected between %d and %d args, got %d" %
+ (self.min_args, self.max_args, len(args)))
+ arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
+ return f(request, response, *arg_values)
+ Pipeline.pipes[f.__name__] = inner
+ #We actually want the undecorated function in the main namespace
+ return f
+
+
+class opt(object):
+ def __init__(self, f):
+ self.f = f
+
+ def __call__(self, arg):
+ return self.f(arg)
+
+
+def nullable(func):
+ def inner(arg):
+ if arg.lower() == "null":
+ return None
+ else:
+ return func(arg)
+ return inner
+
+
+def boolean(arg):
+ if arg.lower() in ("true", "1"):
+ return True
+ elif arg.lower() in ("false", "0"):
+ return False
+ raise ValueError
+
+
+@pipe(int)
+def status(request, response, code):
+ """Alter the status code.
+
+ :param code: Status code to use for the response."""
+ response.status = code
+ return response
+
+
+@pipe(str, str, opt(boolean))
+def header(request, response, name, value, append=False):
+ """Set a HTTP header.
+
+ Replaces any existing HTTP header of the same name unless
+ append is set, in which case the header is appended without
+ replacement.
+
+ :param name: Name of the header to set.
+ :param value: Value to use for the header.
+ :param append: True if existing headers should not be replaced
+ """
+ if not append:
+ response.headers.set(name, value)
+ else:
+ response.headers.append(name, value)
+ return response
+
+
+@pipe(str)
+def trickle(request, response, delays):
+ """Send the response in parts, with time delays.
+
+ :param delays: A string of delays and amounts, in bytes, of the
+ response to send. Each component is separated by
+ a colon. Amounts in bytes are plain integers, whilst
+ delays are floats prefixed with a single d e.g.
+ d1:100:d2
+ Would cause a 1 second delay, would then send 100 bytes
+ of the file, and then cause a 2 second delay, before sending
+ the remainder of the file.
+
+ If the last token is of the form rN, instead of sending the
+ remainder of the file, the previous N instructions will be
+ repeated until the whole file has been sent e.g.
+ d1:100:d2:r2
+ Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
+ and then a further 100 bytes followed by a two second delay
+ until the response has been fully sent.
+ """
+ def parse_delays():
+ parts = delays.split(":")
+ rv = []
+ for item in parts:
+ if item.startswith("d"):
+ item_type = "delay"
+ item = item[1:]
+ value = float(item)
+ elif item.startswith("r"):
+ item_type = "repeat"
+ value = int(item[1:])
+ if not value % 2 == 0:
+ raise ValueError
+ else:
+ item_type = "bytes"
+ value = int(item)
+ if len(rv) and rv[-1][0] == item_type:
+ rv[-1][1] += value
+ else:
+ rv.append((item_type, value))
+ return rv
+
+ delays = parse_delays()
+ if not delays:
+ return response
+ content = resolve_content(response)
+ offset = [0]
+
+ def add_content(delays, repeat=False):
+ for i, (item_type, value) in enumerate(delays):
+ if item_type == "bytes":
+ yield content[offset[0]:offset[0] + value]
+ offset[0] += value
+ elif item_type == "delay":
+ time.sleep(value)
+ elif item_type == "repeat":
+ if i != len(delays) - 1:
+ continue
+ while offset[0] < len(content):
+ for item in add_content(delays[-(value + 1):-1], True):
+ yield item
+
+ if not repeat and offset[0] < len(content):
+ yield content[offset[0]:]
+
+ response.content = add_content(delays)
+ return response
+
+
+@pipe(nullable(int), opt(nullable(int)))
+def slice(request, response, start, end=None):
+ """Send a byte range of the response body
+
+ :param start: The starting offset. Follows python semantics including
+ negative numbers.
+
+ :param end: The ending offset, again with python semantics and None
+ (spelled "null" in a query string) to indicate the end of
+ the file.
+ """
+ content = resolve_content(response)
+ response.content = content[start:end]
+ return response
+
+
+class ReplacementTokenizer(object):
+ def ident(scanner, token):
+ return ("ident", token)
+
+ def index(scanner, token):
+ token = token[1:-1]
+ try:
+ token = int(token)
+ except ValueError:
+ token = unicode(token, "utf8")
+ return ("index", token)
+
+ def var(scanner, token):
+ token = token[:-1]
+ return ("var", token)
+
+ def tokenize(self, string):
+ return self.scanner.scan(string)[0]
+
+ scanner = re.Scanner([(r"\$\w+:", var),
+ (r"\$?\w+(?:\(\))?", ident),
+ (r"\[[^\]]*\]", index)])
+
+
+class FirstWrapper(object):
+ def __init__(self, params):
+ self.params = params
+
+ def __getitem__(self, key):
+ try:
+ return self.params.first(key)
+ except KeyError:
+ return ""
+
+
+@pipe(opt(nullable(str)))
+def sub(request, response, escape_type="html"):
+ """Substitute environment information about the server and request into the script.
+
+ :param escape_type: String detailing the type of escaping to use. Known values are
+ "html" and "none", with "html" the default for historic reasons.
+
+ The format is a very limited template language. Substitutions are
+ enclosed by {{ and }}. There are several avaliable substitutions:
+
+ host
+ A simple string value and represents the primary host from which the
+ tests are being run.
+ domains
+ A dictionary of available domains indexed by subdomain name.
+ ports
+ A dictionary of lists of ports indexed by protocol.
+ location
+ A dictionary of parts of the request URL. Valid keys are
+ 'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
+ 'server' is scheme://host:port, 'host' is hostname:port, and query
+ includes the leading '?', but other delimiters are omitted.
+ headers
+ A dictionary of HTTP headers in the request.
+ GET
+ A dictionary of query parameters supplied with the request.
+ uuid()
+ A pesudo-random UUID suitable for usage with stash
+
+ So for example in a setup running on localhost with a www
+ subdomain and a http server on ports 80 and 81::
+
+ {{host}} => localhost
+ {{domains[www]}} => www.localhost
+ {{ports[http][1]}} => 81
+
+
+ It is also possible to assign a value to a variable name, which must start with
+ the $ character, using the ":" syntax e.g.
+
+ {{$id:uuid()}
+
+ Later substitutions in the same file may then refer to the variable
+ by name e.g.
+
+ {{$id}}
+ """
+ content = resolve_content(response)
+
+ new_content = template(request, content, escape_type=escape_type)
+
+ response.content = new_content
+ return response
+
+def template(request, content, escape_type="html"):
+ #TODO: There basically isn't any error handling here
+ tokenizer = ReplacementTokenizer()
+
+ variables = {}
+
+ def config_replacement(match):
+ content, = match.groups()
+
+ tokens = tokenizer.tokenize(content)
+
+ if tokens[0][0] == "var":
+ variable = tokens[0][1]
+ tokens = tokens[1:]
+ else:
+ variable = None
+
+ assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
+
+ field = tokens[0][1]
+
+ if field in variables:
+ value = variables[field]
+ elif field == "headers":
+ value = request.headers
+ elif field == "GET":
+ value = FirstWrapper(request.GET)
+ elif field in request.server.config:
+ value = request.server.config[tokens[0][1]]
+ elif field == "location":
+ value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
+ request.url_parts.hostname,
+ request.url_parts.port),
+ "scheme": request.url_parts.scheme,
+ "host": "%s:%s" % (request.url_parts.hostname,
+ request.url_parts.port),
+ "hostname": request.url_parts.hostname,
+ "port": request.url_parts.port,
+ "path": request.url_parts.path,
+ "pathname": request.url_parts.path,
+ "query": "?%s" % request.url_parts.query}
+ elif field == "uuid()":
+ value = str(uuid.uuid4())
+ elif field == "url_base":
+ value = request.url_base
+ else:
+ raise Exception("Undefined template variable %s" % field)
+
+ for item in tokens[1:]:
+ value = value[item[1]]
+
+ assert isinstance(value, (int,) + types.StringTypes), tokens
+
+ if variable is not None:
+ variables[variable] = value
+
+ escape_func = {"html": lambda x:escape(x, quote=True),
+ "none": lambda x:x}[escape_type]
+
+ #Should possibly support escaping for other contexts e.g. script
+ #TODO: read the encoding of the response
+ return escape_func(unicode(value)).encode("utf-8")
+
+ template_regexp = re.compile(r"{{([^}]*)}}")
+ new_content = template_regexp.sub(config_replacement, content)
+
+ return new_content
+
+@pipe()
+def gzip(request, response):
+ """This pipe gzip-encodes response data.
+
+ It sets (or overwrites) these HTTP headers:
+ Content-Encoding is set to gzip
+ Content-Length is set to the length of the compressed content
+ """
+ content = resolve_content(response)
+ response.headers.set("Content-Encoding", "gzip")
+
+ out = StringIO()
+ with gzip_module.GzipFile(fileobj=out, mode="w") as f:
+ f.write(content)
+ response.content = out.getvalue()
+
+ response.headers.set("Content-Length", len(response.content))
+
+ return response
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/ranges.py b/testing/web-platform/tests/tools/wptserve/wptserve/ranges.py
new file mode 100644
index 000000000..976cb1781
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/ranges.py
@@ -0,0 +1,90 @@
+from .utils import HTTPException
+
+
+class RangeParser(object):
+ def __call__(self, header, file_size):
+ prefix = "bytes="
+ if not header.startswith(prefix):
+ raise HTTPException(416, message="Unrecognised range type %s" % (header,))
+
+ parts = header[len(prefix):].split(",")
+ ranges = []
+ for item in parts:
+ components = item.split("-")
+ if len(components) != 2:
+ raise HTTPException(416, "Bad range specifier %s" % (item))
+ data = []
+ for component in components:
+ if component == "":
+ data.append(None)
+ else:
+ try:
+ data.append(int(component))
+ except ValueError:
+ raise HTTPException(416, "Bad range specifier %s" % (item))
+ try:
+ ranges.append(Range(data[0], data[1], file_size))
+ except ValueError:
+ raise HTTPException(416, "Bad range specifier %s" % (item))
+
+ return self.coalesce_ranges(ranges, file_size)
+
+ def coalesce_ranges(self, ranges, file_size):
+ rv = []
+ target = None
+ for current in reversed(sorted(ranges)):
+ if target is None:
+ target = current
+ else:
+ new = target.coalesce(current)
+ target = new[0]
+ if len(new) > 1:
+ rv.append(new[1])
+ rv.append(target)
+
+ return rv[::-1]
+
+
+class Range(object):
+ def __init__(self, lower, upper, file_size):
+ self.file_size = file_size
+ self.lower, self.upper = self._abs(lower, upper)
+ if self.lower >= self.upper or self.lower >= self.file_size:
+ raise ValueError
+
+ def __repr__(self):
+ return "<Range %s-%s>" % (self.lower, self.upper)
+
+ def __lt__(self, other):
+ return self.lower < other.lower
+
+ def __gt__(self, other):
+ return self.lower > other.lower
+
+ def __eq__(self, other):
+ return self.lower == other.lower and self.upper == other.upper
+
+ def _abs(self, lower, upper):
+ if lower is None and upper is None:
+ lower, upper = 0, self.file_size
+ elif lower is None:
+ lower, upper = max(0, self.file_size - upper), self.file_size
+ elif upper is None:
+ lower, upper = lower, self.file_size
+ else:
+ lower, upper = lower, min(self.file_size, upper + 1)
+
+ return lower, upper
+
+ def coalesce(self, other):
+ assert self.file_size == other.file_size
+
+ if (self.upper < other.lower or self.lower > other.upper):
+ return sorted([self, other])
+ else:
+ return [Range(min(self.lower, other.lower),
+ max(self.upper, other.upper) - 1,
+ self.file_size)]
+
+ def header_value(self):
+ return "bytes %i-%i/%i" % (self.lower, self.upper - 1, self.file_size)
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/request.py b/testing/web-platform/tests/tools/wptserve/wptserve/request.py
new file mode 100644
index 000000000..6b8a7cef8
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/request.py
@@ -0,0 +1,589 @@
+import base64
+import cgi
+import Cookie
+import StringIO
+import tempfile
+import urlparse
+
+from . import stash
+from .utils import HTTPException
+
+missing = object()
+
+
+class Server(object):
+ """Data about the server environment
+
+ .. attribute:: config
+
+ Environment configuration information with information about the
+ various servers running, their hostnames and ports.
+
+ .. attribute:: stash
+
+ Stash object holding state stored on the server between requests.
+
+ """
+ config = None
+
+ def __init__(self, request):
+ self._stash = None
+ self._request = request
+
+ @property
+ def stash(self):
+ if self._stash is None:
+ address, authkey = stash.load_env_config()
+ self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
+ return self._stash
+
+
+class InputFile(object):
+ max_buffer_size = 1024*1024
+
+ def __init__(self, rfile, length):
+ """File-like object used to provide a seekable view of request body data"""
+ self._file = rfile
+ self.length = length
+
+ self._file_position = 0
+
+ if length > self.max_buffer_size:
+ self._buf = tempfile.TemporaryFile(mode="rw+b")
+ else:
+ self._buf = StringIO.StringIO()
+
+ @property
+ def _buf_position(self):
+ rv = self._buf.tell()
+ assert rv <= self._file_position
+ return rv
+
+ def read(self, bytes=-1):
+ assert self._buf_position <= self._file_position
+
+ if bytes < 0:
+ bytes = self.length - self._buf_position
+ bytes_remaining = min(bytes, self.length - self._buf_position)
+
+ if bytes_remaining == 0:
+ return ""
+
+ if self._buf_position != self._file_position:
+ buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
+ old_data = self._buf.read(buf_bytes)
+ bytes_remaining -= buf_bytes
+ else:
+ old_data = ""
+
+ assert self._buf_position == self._file_position, (
+ "Before reading buffer position (%i) didn't match file position (%i)" %
+ (self._buf_position, self._file_position))
+ new_data = self._file.read(bytes_remaining)
+ self._buf.write(new_data)
+ self._file_position += bytes_remaining
+ assert self._buf_position == self._file_position, (
+ "After reading buffer position (%i) didn't match file position (%i)" %
+ (self._buf_position, self._file_position))
+
+ return old_data + new_data
+
+ def tell(self):
+ return self._buf_position
+
+ def seek(self, offset):
+ if offset > self.length or offset < 0:
+ raise ValueError
+ if offset <= self._file_position:
+ self._buf.seek(offset)
+ else:
+ self.read(offset - self._file_position)
+
+ def readline(self, max_bytes=None):
+ if max_bytes is None:
+ max_bytes = self.length - self._buf_position
+
+ if self._buf_position < self._file_position:
+ data = self._buf.readline(max_bytes)
+ if data.endswith("\n") or len(data) == max_bytes:
+ return data
+ else:
+ data = ""
+
+ assert self._buf_position == self._file_position
+
+ initial_position = self._file_position
+ found = False
+ buf = []
+ max_bytes -= len(data)
+ while not found:
+ readahead = self.read(min(2, max_bytes))
+ max_bytes -= len(readahead)
+ for i, c in enumerate(readahead):
+ if c == "\n":
+ buf.append(readahead[:i+1])
+ found = True
+ break
+ if not found:
+ buf.append(readahead)
+ if not readahead or not max_bytes:
+ break
+ new_data = "".join(buf)
+ data += new_data
+ self.seek(initial_position + len(new_data))
+ return data
+
+ def readlines(self):
+ rv = []
+ while True:
+ data = self.readline()
+ if data:
+ rv.append(data)
+ else:
+ break
+ return rv
+
+ def next(self):
+ data = self.readline()
+ if data:
+ return data
+ else:
+ raise StopIteration
+
+ def __iter__(self):
+ return self
+
+
+class Request(object):
+ """Object representing a HTTP request.
+
+ .. attribute:: doc_root
+
+ The local directory to use as a base when resolving paths
+
+ .. attribute:: route_match
+
+ Regexp match object from matching the request path to the route
+ selected for the request.
+
+ .. attribute:: protocol_version
+
+ HTTP version specified in the request.
+
+ .. attribute:: method
+
+ HTTP method in the request.
+
+ .. attribute:: request_path
+
+ Request path as it appears in the HTTP request.
+
+ .. attribute:: url_base
+
+ The prefix part of the path; typically / unless the handler has a url_base set
+
+ .. attribute:: url
+
+ Absolute URL for the request.
+
+ .. attribute:: headers
+
+ List of request headers.
+
+ .. attribute:: raw_input
+
+ File-like object representing the body of the request.
+
+ .. attribute:: url_parts
+
+ Parts of the requested URL as obtained by urlparse.urlsplit(path)
+
+ .. attribute:: request_line
+
+ Raw request line
+
+ .. attribute:: headers
+
+ RequestHeaders object providing a dictionary-like representation of
+ the request headers.
+
+ .. attribute:: body
+
+ Request body as a string
+
+ .. attribute:: GET
+
+ MultiDict representing the parameters supplied with the request.
+ Note that these may be present on non-GET requests; the name is
+ chosen to be familiar to users of other systems such as PHP.
+
+ .. attribute:: POST
+
+ MultiDict representing the request body parameters. Most parameters
+ are present as string values, but file uploads have file-like
+ values.
+
+ .. attribute:: cookies
+
+ Cookies object representing cookies sent with the request with a
+ dictionary-like interface.
+
+ .. attribute:: auth
+
+ Object with username and password properties representing any
+ credentials supplied using HTTP authentication.
+
+ .. attribute:: server
+
+ Server object containing information about the server environment.
+ """
+
+ def __init__(self, request_handler):
+ self.doc_root = request_handler.server.router.doc_root
+ self.route_match = None # Set by the router
+
+ self.protocol_version = request_handler.protocol_version
+ self.method = request_handler.command
+
+ scheme = request_handler.server.scheme
+ host = request_handler.headers.get("Host")
+ port = request_handler.server.server_address[1]
+
+ if host is None:
+ host = request_handler.server.server_address[0]
+ else:
+ if ":" in host:
+ host, port = host.split(":", 1)
+
+ self.request_path = request_handler.path
+ self.url_base = "/"
+
+ if self.request_path.startswith(scheme + "://"):
+ self.url = request_handler.path
+ else:
+ self.url = "%s://%s:%s%s" % (scheme,
+ host,
+ port,
+ self.request_path)
+ self.url_parts = urlparse.urlsplit(self.url)
+
+ self._raw_headers = request_handler.headers
+
+ self.request_line = request_handler.raw_requestline
+
+ self._headers = None
+
+ self.raw_input = InputFile(request_handler.rfile,
+ int(self.headers.get("Content-Length", 0)))
+ self._body = None
+
+ self._GET = None
+ self._POST = None
+ self._cookies = None
+ self._auth = None
+
+ self.server = Server(self)
+
+ def __repr__(self):
+ return "<Request %s %s>" % (self.method, self.url)
+
+ @property
+ def GET(self):
+ if self._GET is None:
+ params = urlparse.parse_qsl(self.url_parts.query, keep_blank_values=True)
+ self._GET = MultiDict()
+ for key, value in params:
+ self._GET.add(key, value)
+ return self._GET
+
+ @property
+ def POST(self):
+ if self._POST is None:
+ #Work out the post parameters
+ pos = self.raw_input.tell()
+ self.raw_input.seek(0)
+ fs = cgi.FieldStorage(fp=self.raw_input,
+ environ={"REQUEST_METHOD": self.method},
+ headers=self.headers,
+ keep_blank_values=True)
+ self._POST = MultiDict.from_field_storage(fs)
+ self.raw_input.seek(pos)
+ return self._POST
+
+ @property
+ def cookies(self):
+ if self._cookies is None:
+ parser = Cookie.BaseCookie()
+ cookie_headers = self.headers.get("cookie", "")
+ parser.load(cookie_headers)
+ cookies = Cookies()
+ for key, value in parser.iteritems():
+ cookies[key] = CookieValue(value)
+ self._cookies = cookies
+ return self._cookies
+
+ @property
+ def headers(self):
+ if self._headers is None:
+ self._headers = RequestHeaders(self._raw_headers)
+ return self._headers
+
+ @property
+ def body(self):
+ if self._body is None:
+ pos = self.raw_input.tell()
+ self.raw_input.seek(0)
+ self._body = self.raw_input.read()
+ self.raw_input.seek(pos)
+ return self._body
+
+ @property
+ def auth(self):
+ if self._auth is None:
+ self._auth = Authentication(self.headers)
+ return self._auth
+
+
+class RequestHeaders(dict):
+ """Dictionary-like API for accessing request headers."""
+ def __init__(self, items):
+ for key, value in zip(items.keys(), items.values()):
+ key = key.lower()
+ if key in self:
+ self[key].append(value)
+ else:
+ dict.__setitem__(self, key, [value])
+
+ def __getitem__(self, key):
+ """Get all headers of a certain (case-insensitive) name. If there is
+ more than one, the values are returned comma separated"""
+ values = dict.__getitem__(self, key.lower())
+ if len(values) == 1:
+ return values[0]
+ else:
+ return ", ".join(values)
+
+ def __setitem__(self, name, value):
+ raise Exception
+
+ def get(self, key, default=None):
+ """Get a string representing all headers with a particular value,
+ with multiple headers separated by a comma. If no header is found
+ return a default value
+
+ :param key: The header name to look up (case-insensitive)
+ :param default: The value to return in the case of no match
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def get_list(self, key, default=missing):
+ """Get all the header values for a particular field name as
+ a list"""
+ try:
+ return dict.__getitem__(self, key.lower())
+ except KeyError:
+ if default is not missing:
+ return default
+ else:
+ raise
+
+ def __contains__(self, key):
+ return dict.__contains__(self, key.lower())
+
+ def iteritems(self):
+ for item in self:
+ yield item, self[item]
+
+ def itervalues(self):
+ for item in self:
+ yield self[item]
+
+class CookieValue(object):
+ """Representation of cookies.
+
+ Note that cookies are considered read-only and the string value
+ of the cookie will not change if you update the field values.
+ However this is not enforced.
+
+ .. attribute:: key
+
+ The name of the cookie.
+
+ .. attribute:: value
+
+ The value of the cookie
+
+ .. attribute:: expires
+
+ The expiry date of the cookie
+
+ .. attribute:: path
+
+ The path of the cookie
+
+ .. attribute:: comment
+
+ The comment of the cookie.
+
+ .. attribute:: domain
+
+ The domain with which the cookie is associated
+
+ .. attribute:: max_age
+
+ The max-age value of the cookie.
+
+ .. attribute:: secure
+
+ Whether the cookie is marked as secure
+
+ .. attribute:: httponly
+
+ Whether the cookie is marked as httponly
+
+ """
+ def __init__(self, morsel):
+ self.key = morsel.key
+ self.value = morsel.value
+
+ for attr in ["expires", "path",
+ "comment", "domain", "max-age",
+ "secure", "version", "httponly"]:
+ setattr(self, attr.replace("-", "_"), morsel[attr])
+
+ self._str = morsel.OutputString()
+
+ def __str__(self):
+ return self._str
+
+ def __repr__(self):
+ return self._str
+
+ def __eq__(self, other):
+ """Equality comparison for cookies. Compares to other cookies
+ based on value alone and on non-cookies based on the equality
+ of self.value with the other object so that a cookie with value
+ "ham" compares equal to the string "ham"
+ """
+ if hasattr(other, "value"):
+ return self.value == other.value
+ return self.value == other
+
+
+class MultiDict(dict):
+ """Dictionary type that holds multiple values for each
+ key"""
+ #TODO: this should perhaps also order the keys
+ def __init__(self):
+ pass
+
+ def __setitem__(self, name, value):
+ dict.__setitem__(self, name, [value])
+
+ def add(self, name, value):
+ if name in self:
+ dict.__getitem__(self, name).append(value)
+ else:
+ dict.__setitem__(self, name, [value])
+
+ def __getitem__(self, key):
+ """Get the first value with a given key"""
+ #TODO: should this instead be the last value?
+ return self.first(key)
+
+ def first(self, key, default=missing):
+ """Get the first value with a given key
+
+ :param key: The key to lookup
+ :param default: The default to return if key is
+ not found (throws if nothing is
+ specified)
+ """
+ if key in self and dict.__getitem__(self, key):
+ return dict.__getitem__(self, key)[0]
+ elif default is not missing:
+ return default
+ raise KeyError
+
+ def last(self, key, default=missing):
+ """Get the last value with a given key
+
+ :param key: The key to lookup
+ :param default: The default to return if key is
+ not found (throws if nothing is
+ specified)
+ """
+ if key in self and dict.__getitem__(self, key):
+ return dict.__getitem__(self, key)[-1]
+ elif default is not missing:
+ return default
+ raise KeyError
+
+ def get_list(self, key):
+ """Get all values with a given key as a list
+
+ :param key: The key to lookup
+ """
+ return dict.__getitem__(self, key)
+
+ @classmethod
+ def from_field_storage(cls, fs):
+ self = cls()
+ if fs.list is None:
+ return self
+ for key in fs:
+ values = fs[key]
+ if not isinstance(values, list):
+ values = [values]
+
+ for value in values:
+ if value.filename:
+ value = value
+ else:
+ value = value.value
+ self.add(key, value)
+ return self
+
+
+class Cookies(MultiDict):
+ """MultiDict specialised for Cookie values"""
+ def __init__(self):
+ pass
+
+ def __getitem__(self, key):
+ return self.last(key)
+
+
+class Authentication(object):
+ """Object for dealing with HTTP Authentication
+
+ .. attribute:: username
+
+ The username supplied in the HTTP Authorization
+ header, or None
+
+ .. attribute:: password
+
+ The password supplied in the HTTP Authorization
+ header, or None
+ """
+ def __init__(self, headers):
+ self.username = None
+ self.password = None
+
+ auth_schemes = {"Basic": self.decode_basic}
+
+ if "authorization" in headers:
+ header = headers.get("authorization")
+ auth_type, data = header.split(" ", 1)
+ if auth_type in auth_schemes:
+ self.username, self.password = auth_schemes[auth_type](data)
+ else:
+ raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
+
+ def decode_basic(self, data):
+ decoded_data = base64.decodestring(data)
+ return decoded_data.split(":", 1)
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/response.py b/testing/web-platform/tests/tools/wptserve/wptserve/response.py
new file mode 100644
index 000000000..6c073feea
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/response.py
@@ -0,0 +1,473 @@
+from collections import OrderedDict
+from datetime import datetime, timedelta
+import Cookie
+import json
+import types
+import uuid
+import socket
+
+from .constants import response_codes
+from .logger import get_logger
+
+missing = object()
+
+class Response(object):
+ """Object representing the response to a HTTP request
+
+ :param handler: RequestHandler being used for this response
+ :param request: Request that this is the response for
+
+ .. attribute:: request
+
+ Request associated with this Response.
+
+ .. attribute:: encoding
+
+ The encoding to use when converting unicode to strings for output.
+
+ .. attribute:: add_required_headers
+
+ Boolean indicating whether mandatory headers should be added to the
+ response.
+
+ .. attribute:: send_body_for_head_request
+
+ Boolean, default False, indicating whether the body content should be
+ sent when the request method is HEAD.
+
+ .. attribute:: explicit_flush
+
+ Boolean indicating whether output should be flushed automatically or only
+ when requested.
+
+ .. attribute:: writer
+
+ The ResponseWriter for this response
+
+ .. attribute:: status
+
+ Status tuple (code, message). Can be set to an integer, in which case the
+ message part is filled in automatically, or a tuple.
+
+ .. attribute:: headers
+
+ List of HTTP headers to send with the response. Each item in the list is a
+ tuple of (name, value).
+
+ .. attribute:: content
+
+ The body of the response. This can either be a string or a iterable of response
+ parts. If it is an iterable, any item may be a string or a function of zero
+ parameters which, when called, returns a string."""
+
+ def __init__(self, handler, request):
+ self.request = request
+ self.encoding = "utf8"
+
+ self.add_required_headers = True
+ self.send_body_for_head_request = False
+ self.explicit_flush = False
+ self.close_connection = False
+
+ self.writer = ResponseWriter(handler, self)
+
+ self._status = (200, None)
+ self.headers = ResponseHeaders()
+ self.content = []
+
+ self.logger = get_logger()
+
+ @property
+ def status(self):
+ return self._status
+
+ @status.setter
+ def status(self, value):
+ if hasattr(value, "__len__"):
+ if len(value) != 2:
+ raise ValueError
+ else:
+ self._status = (int(value[0]), str(value[1]))
+ else:
+ self._status = (int(value), None)
+
+ def set_cookie(self, name, value, path="/", domain=None, max_age=None,
+ expires=None, secure=False, httponly=False, comment=None):
+ """Set a cookie to be sent with a Set-Cookie header in the
+ response
+
+ :param name: String name of the cookie
+ :param value: String value of the cookie
+ :param max_age: datetime.timedelta int representing the time (in seconds)
+ until the cookie expires
+ :param path: String path to which the cookie applies
+ :param domain: String domain to which the cookie applies
+ :param secure: Boolean indicating whether the cookie is marked as secure
+ :param httponly: Boolean indicating whether the cookie is marked as
+ HTTP Only
+ :param comment: String comment
+ :param expires: datetime.datetime or datetime.timedelta indicating a
+ time or interval from now when the cookie expires
+
+ """
+ days = dict((i+1, name) for i, name in enumerate(["jan", "feb", "mar",
+ "apr", "may", "jun",
+ "jul", "aug", "sep",
+ "oct", "nov", "dec"]))
+ if value is None:
+ value = ''
+ max_age = 0
+ expires = timedelta(days=-1)
+
+ if isinstance(expires, timedelta):
+ expires = datetime.utcnow() + expires
+
+ if expires is not None:
+ expires_str = expires.strftime("%d %%s %Y %H:%M:%S GMT")
+ expires_str = expires_str % days[expires.month]
+ expires = expires_str
+
+ if max_age is not None:
+ if hasattr(max_age, "total_seconds"):
+ max_age = int(max_age.total_seconds())
+ max_age = "%.0d" % max_age
+
+ m = Cookie.Morsel()
+
+ def maybe_set(key, value):
+ if value is not None and value is not False:
+ m[key] = value
+
+ m.set(name, value, value)
+ maybe_set("path", path)
+ maybe_set("domain", domain)
+ maybe_set("comment", comment)
+ maybe_set("expires", expires)
+ maybe_set("max-age", max_age)
+ maybe_set("secure", secure)
+ maybe_set("httponly", httponly)
+
+ self.headers.append("Set-Cookie", m.OutputString())
+
+ def unset_cookie(self, name):
+ """Remove a cookie from those that are being sent with the response"""
+ cookies = self.headers.get("Set-Cookie")
+ parser = Cookie.BaseCookie()
+ for cookie in cookies:
+ parser.load(cookie)
+
+ if name in parser.keys():
+ del self.headers["Set-Cookie"]
+ for m in parser.values():
+ if m.key != name:
+ self.headers.append(("Set-Cookie", m.OutputString()))
+
+ def delete_cookie(self, name, path="/", domain=None):
+ """Delete a cookie on the client by setting it to the empty string
+ and to expire in the past"""
+ self.set_cookie(name, None, path=path, domain=domain, max_age=0,
+ expires=timedelta(days=-1))
+
+ def iter_content(self, read_file=False):
+ """Iterator returning chunks of response body content.
+
+ If any part of the content is a function, this will be called
+ and the resulting value (if any) returned.
+
+ :param read_file: - boolean controlling the behaviour when content
+ is a file handle. When set to False the handle will be returned directly
+ allowing the file to be passed to the output in small chunks. When set to
+ True, the entire content of the file will be returned as a string facilitating
+ non-streaming operations like template substitution.
+ """
+ if isinstance(self.content, types.StringTypes):
+ yield self.content
+ elif hasattr(self.content, "read"):
+ if read_file:
+ yield self.content.read()
+ else:
+ yield self.content
+ else:
+ for item in self.content:
+ if hasattr(item, "__call__"):
+ value = item()
+ else:
+ value = item
+ if value:
+ yield value
+
+ def write_status_headers(self):
+ """Write out the status line and headers for the response"""
+ self.writer.write_status(*self.status)
+ for item in self.headers:
+ self.writer.write_header(*item)
+ self.writer.end_headers()
+
+ def write_content(self):
+ """Write out the response content"""
+ if self.request.method != "HEAD" or self.send_body_for_head_request:
+ for item in self.iter_content():
+ self.writer.write_content(item)
+
+ def write(self):
+ """Write the whole response"""
+ self.write_status_headers()
+ self.write_content()
+
+ def set_error(self, code, message=""):
+ """Set the response status headers and body to indicate an
+ error"""
+ err = {"code": code,
+ "message": message}
+ data = json.dumps({"error": err})
+ self.status = code
+ self.headers = [("Content-Type", "application/json"),
+ ("Content-Length", len(data))]
+ self.content = data
+ if code == 500:
+ self.logger.error(message)
+
+
+class MultipartContent(object):
+ def __init__(self, boundary=None, default_content_type=None):
+ self.items = []
+ if boundary is None:
+ boundary = str(uuid.uuid4())
+ self.boundary = boundary
+ self.default_content_type = default_content_type
+
+ def __call__(self):
+ boundary = "--" + self.boundary
+ rv = ["", boundary]
+ for item in self.items:
+ rv.append(str(item))
+ rv.append(boundary)
+ rv[-1] += "--"
+ return "\r\n".join(rv)
+
+ def append_part(self, data, content_type=None, headers=None):
+ if content_type is None:
+ content_type = self.default_content_type
+ self.items.append(MultipartPart(data, content_type, headers))
+
+ def __iter__(self):
+ #This is hackish; when writing the response we need an iterable
+ #or a string. For a multipart/byterange response we want an
+ #iterable that contains a single callable; the MultipartContent
+ #object itself
+ yield self
+
+
+class MultipartPart(object):
+ def __init__(self, data, content_type=None, headers=None):
+ self.headers = ResponseHeaders()
+
+ if content_type is not None:
+ self.headers.set("Content-Type", content_type)
+
+ if headers is not None:
+ for name, value in headers:
+ if name.lower() == "content-type":
+ func = self.headers.set
+ else:
+ func = self.headers.append
+ func(name, value)
+
+ self.data = data
+
+ def __str__(self):
+ rv = []
+ for item in self.headers:
+ rv.append("%s: %s" % item)
+ rv.append("")
+ rv.append(self.data)
+ return "\r\n".join(rv)
+
+
+class ResponseHeaders(object):
+ """Dictionary-like object holding the headers for the response"""
+ def __init__(self):
+ self.data = OrderedDict()
+
+ def set(self, key, value):
+ """Set a header to a specific value, overwriting any previous header
+ with the same name
+
+ :param key: Name of the header to set
+ :param value: Value to set the header to
+ """
+ self.data[key.lower()] = (key, [value])
+
+ def append(self, key, value):
+ """Add a new header with a given name, not overwriting any existing
+ headers with the same name
+
+ :param key: Name of the header to add
+ :param value: Value to set for the header
+ """
+ if key.lower() in self.data:
+ self.data[key.lower()][1].append(value)
+ else:
+ self.set(key, value)
+
+ def get(self, key, default=missing):
+ """Get the set values for a particular header."""
+ try:
+ return self[key]
+ except KeyError:
+ if default is missing:
+ return []
+ return default
+
+ def __getitem__(self, key):
+ """Get a list of values for a particular header
+
+ """
+ return self.data[key.lower()][1]
+
+ def __delitem__(self, key):
+ del self.data[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self.data
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def __iter__(self):
+ for key, values in self.data.itervalues():
+ for value in values:
+ yield key, value
+
+ def items(self):
+ return list(self)
+
+ def update(self, items_iter):
+ for name, value in items_iter:
+ self.append(name, value)
+
+ def __repr__(self):
+ return repr(self.data)
+
+
+class ResponseWriter(object):
+ """Object providing an API to write out a HTTP response.
+
+ :param handler: The RequestHandler being used.
+ :param response: The Response associated with this writer.
+
+ After each part of the response is written, the output is
+ flushed unless response.explicit_flush is False, in which case
+ the user must call .flush() explicitly."""
+ def __init__(self, handler, response):
+ self._wfile = handler.wfile
+ self._response = response
+ self._handler = handler
+ self._headers_seen = set()
+ self._headers_complete = False
+ self.content_written = False
+ self.request = response.request
+ self.file_chunk_size = 32 * 1024
+
+ def write_status(self, code, message=None):
+ """Write out the status line of a response.
+
+ :param code: The integer status code of the response.
+ :param message: The message of the response. Defaults to the message commonly used
+ with the status code."""
+ if message is None:
+ if code in response_codes:
+ message = response_codes[code][0]
+ else:
+ message = ''
+ self.write("%s %d %s\r\n" %
+ (self._response.request.protocol_version, code, message))
+
+ def write_header(self, name, value):
+ """Write out a single header for the response.
+
+ :param name: Name of the header field
+ :param value: Value of the header field
+ """
+ self._headers_seen.add(name.lower())
+ self.write("%s: %s\r\n" % (name, value))
+ if not self._response.explicit_flush:
+ self.flush()
+
+ def write_default_headers(self):
+ for name, f in [("Server", self._handler.version_string),
+ ("Date", self._handler.date_time_string)]:
+ if name.lower() not in self._headers_seen:
+ self.write_header(name, f())
+
+ if (type(self._response.content) in (str, unicode) and
+ "content-length" not in self._headers_seen):
+ #Would be nice to avoid double-encoding here
+ self.write_header("Content-Length", len(self.encode(self._response.content)))
+
+ def end_headers(self):
+ """Finish writing headers and write the separator.
+
+ Unless add_required_headers on the response is False,
+ this will also add HTTP-mandated headers that have not yet been supplied
+ to the response headers"""
+
+ if self._response.add_required_headers:
+ self.write_default_headers()
+
+ self.write("\r\n")
+ if "content-length" not in self._headers_seen:
+ self._response.close_connection = True
+ if not self._response.explicit_flush:
+ self.flush()
+ self._headers_complete = True
+
+ def write_content(self, data):
+ """Write the body of the response."""
+ if isinstance(data, types.StringTypes):
+ self.write(data)
+ else:
+ self.write_content_file(data)
+ if not self._response.explicit_flush:
+ self.flush()
+
+ def write(self, data):
+ """Write directly to the response, converting unicode to bytes
+ according to response.encoding. Does not flush."""
+ self.content_written = True
+ try:
+ self._wfile.write(self.encode(data))
+ except socket.error:
+ # This can happen if the socket got closed by the remote end
+ pass
+
+ def write_content_file(self, data):
+ """Write a file-like object directly to the response in chunks.
+ Does not flush."""
+ self.content_written = True
+ while True:
+ buf = data.read(self.file_chunk_size)
+ if not buf:
+ break
+ try:
+ self._wfile.write(buf)
+ except socket.error:
+ break
+ data.close()
+
+ def encode(self, data):
+ """Convert unicode to bytes according to response.encoding."""
+ if isinstance(data, str):
+ return data
+ elif isinstance(data, unicode):
+ return data.encode(self._response.encoding)
+ else:
+ raise ValueError
+
+ def flush(self):
+ """Flush the output."""
+ try:
+ self._wfile.flush()
+ except socket.error:
+ # This can happen if the socket got closed by the remote end
+ pass
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/router.py b/testing/web-platform/tests/tools/wptserve/wptserve/router.py
new file mode 100644
index 000000000..a35e098e6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/router.py
@@ -0,0 +1,168 @@
+import itertools
+import re
+import types
+
+from .logger import get_logger
+
+any_method = object()
+
+class RouteTokenizer(object):
+ def literal(self, scanner, token):
+ return ("literal", token)
+
+ def slash(self, scanner, token):
+ return ("slash", None)
+
+ def group(self, scanner, token):
+ return ("group", token[1:-1])
+
+ def star(self, scanner, token):
+ return ("star", token[1:-3])
+
+ def scan(self, input_str):
+ scanner = re.Scanner([(r"/", self.slash),
+ (r"{\w*}", self.group),
+ (r"\*", self.star),
+ (r"(?:\\.|[^{\*/])*", self.literal),])
+ return scanner.scan(input_str)
+
+class RouteCompiler(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.star_seen = False
+
+ def compile(self, tokens):
+ self.reset()
+
+ func_map = {"slash":self.process_slash,
+ "literal":self.process_literal,
+ "group":self.process_group,
+ "star":self.process_star}
+
+ re_parts = ["^"]
+
+ if not tokens or tokens[0][0] != "slash":
+ tokens = itertools.chain([("slash", None)], tokens)
+
+ for token in tokens:
+ re_parts.append(func_map[token[0]](token))
+
+ if self.star_seen:
+ re_parts.append(")")
+ re_parts.append("$")
+
+ return re.compile("".join(re_parts))
+
+ def process_literal(self, token):
+ return re.escape(token[1])
+
+ def process_slash(self, token):
+ return "/"
+
+ def process_group(self, token):
+ if self.star_seen:
+ raise ValueError("Group seen after star in regexp")
+ return "(?P<%s>[^/]+)" % token[1]
+
+ def process_star(self, token):
+ if self.star_seen:
+ raise ValueError("Star seen after star in regexp")
+ self.star_seen = True
+ return "(.*"
+
+def compile_path_match(route_pattern):
+ """tokens: / or literal or match or *"""
+
+ tokenizer = RouteTokenizer()
+ tokens, unmatched = tokenizer.scan(route_pattern)
+
+ assert unmatched == "", unmatched
+
+ compiler = RouteCompiler()
+
+ return compiler.compile(tokens)
+
+class Router(object):
+ """Object for matching handler functions to requests.
+
+ :param doc_root: Absolute path of the filesystem location from
+ which to serve tests
+ :param routes: Initial routes to add; a list of three item tuples
+ (method, path_pattern, handler_function), defined
+ as for register()
+ """
+
+ def __init__(self, doc_root, routes):
+ self.doc_root = doc_root
+ self.routes = []
+ self.logger = get_logger()
+ for route in reversed(routes):
+ self.register(*route)
+
+ def register(self, methods, path, handler):
+ """Register a handler for a set of paths.
+
+ :param methods: Set of methods this should match. "*" is a
+ special value indicating that all methods should
+ be matched.
+
+ :param path_pattern: Match pattern that will be used to determine if
+ a request path matches this route. Match patterns
+ consist of either literal text, match groups,
+ denoted {name}, which match any character except /,
+ and, at most one \*, which matches and character and
+ creates a match group to the end of the string.
+ If there is no leading "/" on the pattern, this is
+ automatically implied. For example::
+
+ api/{resource}/*.json
+
+ Would match `/api/test/data.json` or
+ `/api/test/test2/data.json`, but not `/api/test/data.py`.
+
+ The match groups are made available in the request object
+ as a dictionary through the route_match property. For
+ example, given the route pattern above and the path
+ `/api/test/data.json`, the route_match property would
+ contain::
+
+ {"resource": "test", "*": "data.json"}
+
+ :param handler: Function that will be called to process matching
+ requests. This must take two parameters, the request
+ object and the response object.
+
+ """
+ if type(methods) in types.StringTypes or methods in (any_method, "*"):
+ methods = [methods]
+ for method in methods:
+ self.routes.append((method, compile_path_match(path), handler))
+ self.logger.debug("Route pattern: %s" % self.routes[-1][1].pattern)
+
+ def get_handler(self, request):
+ """Get a handler for a request or None if there is no handler.
+
+ :param request: Request to get a handler for.
+ :rtype: Callable or None
+ """
+ for method, regexp, handler in reversed(self.routes):
+ if (request.method == method or
+ method in (any_method, "*") or
+ (request.method == "HEAD" and method == "GET")):
+ m = regexp.match(request.url_parts.path)
+ if m:
+ if not hasattr(handler, "__class__"):
+ name = handler.__name__
+ else:
+ name = handler.__class__.__name__
+ self.logger.debug("Found handler %s" % name)
+
+ match_parts = m.groupdict().copy()
+ if len(match_parts) < len(m.groups()):
+ match_parts["*"] = m.groups()[-1]
+ request.route_match = match_parts
+
+ return handler
+ return None
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/routes.py b/testing/web-platform/tests/tools/wptserve/wptserve/routes.py
new file mode 100644
index 000000000..b6e380001
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/routes.py
@@ -0,0 +1,6 @@
+from . import handlers
+from .router import any_method
+routes = [(any_method, "*.py", handlers.python_script_handler),
+ ("GET", "*.asis", handlers.as_is_handler),
+ ("GET", "*", handlers.file_handler),
+ ]
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/server.py b/testing/web-platform/tests/tools/wptserve/wptserve/server.py
new file mode 100644
index 000000000..31929efd6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/server.py
@@ -0,0 +1,461 @@
+import BaseHTTPServer
+import errno
+import os
+import socket
+from SocketServer import ThreadingMixIn
+import ssl
+import sys
+import threading
+import time
+import traceback
+import types
+import urlparse
+
+from . import routes as default_routes
+from .logger import get_logger
+from .request import Server, Request
+from .response import Response
+from .router import Router
+from .utils import HTTPException
+
+
+"""HTTP server designed for testing purposes.
+
+The server is designed to provide flexibility in the way that
+requests are handled, and to provide control both of exactly
+what bytes are put on the wire for the response, and in the
+timing of sending those bytes.
+
+The server is based on the stdlib HTTPServer, but with some
+notable differences in the way that requests are processed.
+Overall processing is handled by a WebTestRequestHandler,
+which is a subclass of BaseHTTPRequestHandler. This is responsible
+for parsing the incoming request. A RequestRewriter is then
+applied and may change the request data if it matches a
+supplied rule.
+
+Once the request data had been finalised, Request and Reponse
+objects are constructed. These are used by the other parts of the
+system to read information about the request and manipulate the
+response.
+
+Each request is handled by a particular handler function. The
+mapping between Request and the appropriate handler is determined
+by a Router. By default handlers are installed to interpret files
+under the document root with .py extensions as executable python
+files (see handlers.py for the api for such files), .asis files as
+bytestreams to be sent literally and all other files to be served
+statically.
+
+The handler functions are responsible for either populating the
+fields of the response object, which will then be written when the
+handler returns, or for directly writing to the output stream.
+"""
+
+
+class RequestRewriter(object):
+ def __init__(self, rules):
+ """Object for rewriting the request path.
+
+ :param rules: Initial rules to add; a list of three item tuples
+ (method, input_path, output_path), defined as for
+ register()
+ """
+ self.rules = {}
+ for rule in reversed(rules):
+ self.register(*rule)
+ self.logger = get_logger()
+
+ def register(self, methods, input_path, output_path):
+ """Register a rewrite rule.
+
+ :param methods: Set of methods this should match. "*" is a
+ special value indicating that all methods should
+ be matched.
+
+ :param input_path: Path to match for the initial request.
+
+ :param output_path: Path to replace the input path with in
+ the request.
+ """
+ if type(methods) in types.StringTypes:
+ methods = [methods]
+ self.rules[input_path] = (methods, output_path)
+
+ def rewrite(self, request_handler):
+ """Rewrite the path in a BaseHTTPRequestHandler instance, if
+ it matches a rule.
+
+ :param request_handler: BaseHTTPRequestHandler for which to
+ rewrite the request.
+ """
+ split_url = urlparse.urlsplit(request_handler.path)
+ if split_url.path in self.rules:
+ methods, destination = self.rules[split_url.path]
+ if "*" in methods or request_handler.command in methods:
+ self.logger.debug("Rewriting request path %s to %s" %
+ (request_handler.path, destination))
+ new_url = list(split_url)
+ new_url[2] = destination
+ new_url = urlparse.urlunsplit(new_url)
+ request_handler.path = new_url
+
+
+class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ allow_reuse_address = True
+ acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
+ request_queue_size = 2000
+
+ # Ensure that we don't hang on shutdown waiting for requests
+ daemon_threads = True
+
+ def __init__(self, server_address, RequestHandlerClass, router, rewriter, bind_hostname,
+ config=None, use_ssl=False, key_file=None, certificate=None,
+ encrypt_after_connect=False, latency=None, **kwargs):
+ """Server for HTTP(s) Requests
+
+ :param server_address: tuple of (server_name, port)
+
+ :param RequestHandlerClass: BaseHTTPRequestHandler-like class to use for
+ handling requests.
+
+ :param router: Router instance to use for matching requests to handler
+ functions
+
+ :param rewriter: RequestRewriter-like instance to use for preprocessing
+ requests before they are routed
+
+ :param config: Dictionary holding environment configuration settings for
+ handlers to read, or None to use the default values.
+
+ :param use_ssl: Boolean indicating whether the server should use SSL
+
+ :param key_file: Path to key file to use if SSL is enabled.
+
+ :param certificate: Path to certificate to use if SSL is enabled.
+
+ :param encrypt_after_connect: For each connection, don't start encryption
+ until a CONNECT message has been received.
+ This enables the server to act as a
+ self-proxy.
+
+ :param bind_hostname True to bind the server to both the hostname and
+ port specified in the server_address parameter.
+ False to bind the server only to the port in the
+ server_address parameter, but not to the hostname.
+ :param latency: Delay in ms to wait before seving each response, or
+ callable that returns a delay in ms
+ """
+ self.router = router
+ self.rewriter = rewriter
+
+ self.scheme = "https" if use_ssl else "http"
+ self.logger = get_logger()
+
+ self.latency = latency
+
+ if bind_hostname:
+ hostname_port = server_address
+ else:
+ hostname_port = ("",server_address[1])
+
+ #super doesn't work here because BaseHTTPServer.HTTPServer is old-style
+ BaseHTTPServer.HTTPServer.__init__(self, hostname_port, RequestHandlerClass, **kwargs)
+
+ if config is not None:
+ Server.config = config
+ else:
+ self.logger.debug("Using default configuration")
+ Server.config = {"host": server_address[0],
+ "domains": {"": server_address[0]},
+ "ports": {"http": [self.server_address[1]]}}
+
+
+ self.key_file = key_file
+ self.certificate = certificate
+ self.encrypt_after_connect = use_ssl and encrypt_after_connect
+
+ if use_ssl and not encrypt_after_connect:
+ self.socket = ssl.wrap_socket(self.socket,
+ keyfile=self.key_file,
+ certfile=self.certificate,
+ server_side=True)
+
+ def handle_error(self, request, client_address):
+ error = sys.exc_info()[1]
+
+ if ((isinstance(error, socket.error) and
+ isinstance(error.args, tuple) and
+ error.args[0] in self.acceptable_errors) or
+ (isinstance(error, IOError) and
+ error.errno in self.acceptable_errors)):
+ pass # remote hang up before the result is sent
+ else:
+ self.logger.error(traceback.format_exc())
+
+
+class WebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """RequestHandler for WebTestHttpd"""
+
+ protocol_version = "HTTP/1.1"
+
+ def handle_one_request(self):
+ response = None
+ self.logger = get_logger()
+ try:
+ self.close_connection = False
+ request_line_is_valid = self.get_request_line()
+
+ if self.close_connection:
+ return
+
+ request_is_valid = self.parse_request()
+ if not request_is_valid:
+ #parse_request() actually sends its own error responses
+ return
+
+ self.server.rewriter.rewrite(self)
+
+ request = Request(self)
+ response = Response(self, request)
+
+ if request.method == "CONNECT":
+ self.handle_connect(response)
+ return
+
+ if not request_line_is_valid:
+ response.set_error(414)
+ response.write()
+ return
+
+ self.logger.debug("%s %s" % (request.method, request.request_path))
+ handler = self.server.router.get_handler(request)
+
+ # If the handler we used for the request had a non-default base path
+ # set update the doc_root of the request to reflect this
+ if hasattr(handler, "base_path") and handler.base_path:
+ request.doc_root = handler.base_path
+ if hasattr(handler, "url_base") and handler.url_base != "/":
+ request.url_base = handler.url_base
+
+ if self.server.latency is not None:
+ if callable(self.server.latency):
+ latency = self.server.latency()
+ else:
+ latency = self.server.latency
+ self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
+ time.sleep(latency / 1000.)
+
+ if handler is None:
+ response.set_error(404)
+ else:
+ try:
+ handler(request, response)
+ except HTTPException as e:
+ response.set_error(e.code, e.message)
+ except Exception as e:
+ if e.message:
+ err = [e.message]
+ else:
+ err = []
+ err.append(traceback.format_exc())
+ response.set_error(500, "\n".join(err))
+ self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
+ request.method,
+ request.request_path,
+ request.headers.get('Referer'),
+ request.raw_input.length))
+
+ if not response.writer.content_written:
+ response.write()
+
+ # If we want to remove this in the future, a solution is needed for
+ # scripts that produce a non-string iterable of content, since these
+ # can't set a Content-Length header. A notable example of this kind of
+ # problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
+ if response.close_connection:
+ self.close_connection = True
+
+ if not self.close_connection:
+ # Ensure that the whole request has been read from the socket
+ request.raw_input.read()
+
+ except socket.timeout as e:
+ self.log_error("Request timed out: %r", e)
+ self.close_connection = True
+ return
+
+ except Exception as e:
+ err = traceback.format_exc()
+ if response:
+ response.set_error(500, err)
+ response.write()
+ self.logger.error(err)
+
+ def get_request_line(self):
+ try:
+ self.raw_requestline = self.rfile.readline(65537)
+ except socket.error:
+ self.close_connection = True
+ return False
+ if len(self.raw_requestline) > 65536:
+ self.requestline = ''
+ self.request_version = ''
+ self.command = ''
+ return False
+ if not self.raw_requestline:
+ self.close_connection = True
+ return True
+
+ def handle_connect(self, response):
+ self.logger.debug("Got CONNECT")
+ response.status = 200
+ response.write()
+ if self.server.encrypt_after_connect:
+ self.logger.debug("Enabling SSL for connection")
+ self.request = ssl.wrap_socket(self.connection,
+ keyfile=self.server.key_file,
+ certfile=self.server.certificate,
+ server_side=True)
+ self.setup()
+ return
+
+
+class WebTestHttpd(object):
+ """
+ :param host: Host from which to serve (default: 127.0.0.1)
+ :param port: Port from which to serve (default: 8000)
+ :param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
+ :param handler_cls: Class to use for the RequestHandler
+ :param use_ssl: Use a SSL server if no explicit server_cls is supplied
+ :param key_file: Path to key file to use if ssl is enabled
+ :param certificate: Path to certificate file to use if ssl is enabled
+ :param encrypt_after_connect: For each connection, don't start encryption
+ until a CONNECT message has been received.
+ This enables the server to act as a
+ self-proxy.
+ :param router_cls: Router class to use when matching URLs to handlers
+ :param doc_root: Document root for serving files
+ :param routes: List of routes with which to initialize the router
+ :param rewriter_cls: Class to use for request rewriter
+ :param rewrites: List of rewrites with which to initialize the rewriter_cls
+ :param config: Dictionary holding environment configuration settings for
+ handlers to read, or None to use the default values.
+ :param bind_hostname: Boolean indicating whether to bind server to hostname.
+ :param latency: Delay in ms to wait before seving each response, or
+ callable that returns a delay in ms
+
+ HTTP server designed for testing scenarios.
+
+ Takes a router class which provides one method get_handler which takes a Request
+ and returns a handler function.
+
+ .. attribute:: host
+
+ The host name or ip address of the server
+
+ .. attribute:: port
+
+ The port on which the server is running
+
+ .. attribute:: router
+
+ The Router object used to associate requests with resources for this server
+
+ .. attribute:: rewriter
+
+ The Rewriter object used for URL rewriting
+
+ .. attribute:: use_ssl
+
+ Boolean indicating whether the server is using ssl
+
+ .. attribute:: started
+
+ Boolean indictaing whether the server is running
+
+ """
+ def __init__(self, host="127.0.0.1", port=8000,
+ server_cls=None, handler_cls=WebTestRequestHandler,
+ use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
+ router_cls=Router, doc_root=os.curdir, routes=None,
+ rewriter_cls=RequestRewriter, bind_hostname=True, rewrites=None,
+ latency=None, config=None):
+
+ if routes is None:
+ routes = default_routes.routes
+
+ self.host = host
+
+ self.router = router_cls(doc_root, routes)
+ self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
+
+ self.use_ssl = use_ssl
+ self.logger = get_logger()
+
+ if server_cls is None:
+ server_cls = WebTestServer
+
+ if use_ssl:
+ if key_file is not None:
+ assert os.path.exists(key_file)
+ assert certificate is not None and os.path.exists(certificate)
+
+ try:
+ self.httpd = server_cls((host, port),
+ handler_cls,
+ self.router,
+ self.rewriter,
+ config=config,
+ bind_hostname=bind_hostname,
+ use_ssl=use_ssl,
+ key_file=key_file,
+ certificate=certificate,
+ encrypt_after_connect=encrypt_after_connect,
+ latency=latency)
+ self.started = False
+
+ _host, self.port = self.httpd.socket.getsockname()
+ except Exception:
+ self.logger.error('Init failed! You may need to modify your hosts file. Refer to README.md.')
+ raise
+
+ def start(self, block=False):
+ """Start the server.
+
+ :param block: True to run the server on the current thread, blocking,
+ False to run on a separate thread."""
+ self.logger.info("Starting http server on %s:%s" % (self.host, self.port))
+ self.started = True
+ if block:
+ self.httpd.serve_forever()
+ else:
+ self.server_thread = threading.Thread(target=self.httpd.serve_forever)
+ self.server_thread.setDaemon(True) # don't hang on exit
+ self.server_thread.start()
+
+ def stop(self):
+ """
+ Stops the server.
+
+ If the server is not running, this method has no effect.
+ """
+ if self.started:
+ try:
+ self.httpd.shutdown()
+ self.httpd.server_close()
+ self.server_thread.join()
+ self.server_thread = None
+ self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
+ except AttributeError:
+ pass
+ self.started = False
+ self.httpd = None
+
+ def get_url(self, path="/", query=None, fragment=None):
+ if not self.started:
+ return None
+
+ return urlparse.urlunsplit(("http" if not self.use_ssl else "https",
+ "%s:%s" % (self.host, self.port),
+ path, query, fragment))
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/stash.py b/testing/web-platform/tests/tools/wptserve/wptserve/stash.py
new file mode 100644
index 000000000..b6bd6eed4
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/stash.py
@@ -0,0 +1,143 @@
+import base64
+import json
+import os
+import uuid
+from multiprocessing.managers import BaseManager, DictProxy
+
+class ServerDictManager(BaseManager):
+ shared_data = {}
+
+def _get_shared():
+ return ServerDictManager.shared_data
+
+ServerDictManager.register("get_dict",
+ callable=_get_shared,
+ proxytype=DictProxy)
+
+class ClientDictManager(BaseManager):
+ pass
+
+ClientDictManager.register("get_dict")
+
+class StashServer(object):
+ def __init__(self, address=None, authkey=None):
+ self.address = address
+ self.authkey = authkey
+ self.manager = None
+
+ def __enter__(self):
+ self.manager, self.address, self.authkey = start_server(self.address, self.authkey)
+ store_env_config(self.address, self.authkey)
+
+ def __exit__(self, *args, **kwargs):
+ if self.manager is not None:
+ self.manager.shutdown()
+
+def load_env_config():
+ address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"])
+ if isinstance(address, list):
+ address = tuple(address)
+ else:
+ address = str(address)
+ authkey = base64.decodestring(authkey)
+ return address, authkey
+
+def store_env_config(address, authkey):
+ authkey = base64.encodestring(authkey)
+ os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey))
+
+def start_server(address=None, authkey=None):
+ manager = ServerDictManager(address, authkey)
+ manager.start()
+
+ return (manager, manager._address, manager._authkey)
+
+
+#TODO: Consider expiring values after some fixed time for long-running
+#servers
+
+class Stash(object):
+ """Key-value store for persisting data across HTTP/S and WS/S requests.
+
+ This data store is specifically designed for persisting data across server
+ requests. The synchronization is achieved by using the BaseManager from
+ the multiprocessing module so different processes can acccess the same data.
+
+ Stash can be used interchangeably between HTTP, HTTPS, WS and WSS servers.
+ A thing to note about WS/S servers is that they require additional steps in
+ the handlers for accessing the same underlying shared data in the Stash.
+ This can usually be achieved by using load_env_config(). When using Stash
+ interchangeably between HTTP/S and WS/S request, the path part of the key
+ should be expliclitly specified if accessing the same key/value subset.
+
+ The store has several unusual properties. Keys are of the form (path,
+ uuid), where path is, by default, the path in the HTTP request and
+ uuid is a unique id. In addition, the store is write-once, read-once,
+ i.e. the value associated with a particular key cannot be changed once
+ written and the read operation (called "take") is destructive. Taken together,
+ these properties make it difficult for data to accidentally leak
+ between different resources or different requests for the same
+ resource.
+ """
+
+ _proxy = None
+
+ def __init__(self, default_path, address=None, authkey=None):
+ self.default_path = default_path
+ self.data = self._get_proxy(address, authkey)
+
+ def _get_proxy(self, address=None, authkey=None):
+ if address is None and authkey is None:
+ Stash._proxy = {}
+
+ if Stash._proxy is None:
+ manager = ClientDictManager(address, authkey)
+ manager.connect()
+ Stash._proxy = manager.get_dict()
+
+ return Stash._proxy
+
+ def _wrap_key(self, key, path):
+ if path is None:
+ path = self.default_path
+ # This key format is required to support using the path. Since the data
+ # passed into the stash can be a DictProxy which wouldn't detect changes
+ # when writing to a subdict.
+ return (str(path), str(uuid.UUID(key)))
+
+ def put(self, key, value, path=None):
+ """Place a value in the shared stash.
+
+ :param key: A UUID to use as the data's key.
+ :param value: The data to store. This can be any python object.
+ :param path: The path that has access to read the data (by default
+ the current request path)"""
+ if value is None:
+ raise ValueError("SharedStash value may not be set to None")
+ internal_key = self._wrap_key(key, path)
+ if internal_key in self.data:
+ raise StashError("Tried to overwrite existing shared stash value "
+ "for key %s (old value was %s, new value is %s)" %
+ (internal_key, self.data[str(internal_key)], value))
+ else:
+ self.data[internal_key] = value
+
+ def take(self, key, path=None):
+ """Remove a value from the shared stash and return it.
+
+ :param key: A UUID to use as the data's key.
+ :param path: The path that has access to read the data (by default
+ the current request path)"""
+ internal_key = self._wrap_key(key, path)
+ value = self.data.get(internal_key, None)
+ if value is not None:
+ try:
+ self.data.pop(internal_key)
+ except KeyError:
+ # Silently continue when pop error occurs.
+ pass
+
+ return value
+
+class StashError(Exception):
+ pass
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/utils.py b/testing/web-platform/tests/tools/wptserve/wptserve/utils.py
new file mode 100644
index 000000000..e57ff196a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/utils.py
@@ -0,0 +1,14 @@
+def invert_dict(dict):
+ rv = {}
+ for key, values in dict.iteritems():
+ for value in values:
+ if value in rv:
+ raise ValueError
+ rv[value] = key
+ return rv
+
+
+class HTTPException(Exception):
+ def __init__(self, code, message=""):
+ self.code = code
+ self.message = message
diff --git a/testing/web-platform/tests/tools/wptserve/wptserve/wptserve.py b/testing/web-platform/tests/tools/wptserve/wptserve/wptserve.py
new file mode 100755
index 000000000..816c8a5a6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptserve/wptserve/wptserve.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+import argparse
+import os
+
+import server
+
+def abs_path(path):
+ return os.path.abspath(path)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="HTTP server designed for extreme flexibility "
+ "required in testing situations.")
+ parser.add_argument("document_root", action="store", type=abs_path,
+ help="Root directory to serve files from")
+ parser.add_argument("--port", "-p", dest="port", action="store",
+ type=int, default=8000,
+ help="Port number to run server on")
+ parser.add_argument("--host", "-H", dest="host", action="store",
+ type=str, default="127.0.0.1",
+ help="Host to run server on")
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ httpd = server.WebTestHttpd(host=args.host, port=args.port,
+ use_ssl=False, certificate=None,
+ doc_root=args.document_root)
+ httpd.start()
+
+if __name__ == "__main__":
+ main()