From 5f8de423f190bbb79a62f804151bc24824fa32d8 Mon Sep 17 00:00:00 2001 From: "Matt A. Tobin" Date: Fri, 2 Feb 2018 04:16:08 -0500 Subject: Add m-esr52 at 52.6.0 --- tools/bloatview/bloatdiff.pl | 372 +++ tools/bloatview/bloattable.pl | 590 ++++ tools/check-moz-style/checkmozstyle.py | 172 ++ tools/check-moz-style/diff_parser.py | 162 + tools/check-moz-style/modules/__init__.py | 1 + tools/check-moz-style/modules/cpplint.py | 3150 ++++++++++++++++++++ tools/check-moz-style/modules/diff_parser.py | 180 ++ tools/check-moz-style/modules/logging.py | 39 + tools/check-moz-style/modules/scm.py | 420 +++ tools/check-moz-style/run_tests.py | 78 + tools/check-moz-style/tests/test1.cpp | 0 tools/check-moz-style/tests/test1.out | 1 + tools/check-moz-style/tests/test1.patch | 1 + tools/check-moz-style/tests/test2.cpp | 3 + tools/check-moz-style/tests/test2.out | 4 + tools/check-moz-style/tests/test2.patch | 9 + tools/check-moz-style/tests/test3.out | 3 + tools/check-moz-style/tests/test3.patch | 12 + tools/check-moz-style/tests/test4.cpp | 40 + tools/check-moz-style/tests/test4.out | 13 + tools/check-moz-style/tests/test4.patch | 49 + tools/check-moz-style/tests/test5.cpp | 24 + tools/check-moz-style/tests/test5.out | 7 + tools/check-moz-style/tests/test5.patch | 33 + tools/coverity/model.cpp | 128 + tools/docs/Vagrantfile | 13 + tools/docs/conf.py | 83 + tools/docs/index.rst | 58 + tools/docs/mach_commands.py | 117 + tools/docs/moztreedocs/__init__.py | 126 + tools/fuzzing/interface/FuzzingInterface.cpp | 67 + tools/fuzzing/interface/FuzzingInterface.h | 100 + tools/fuzzing/interface/moz.build | 15 + tools/fuzzing/libfuzzer/FuzzerCustomMain.cpp | 37 + tools/fuzzing/libfuzzer/Makefile.in | 12 + tools/fuzzing/libfuzzer/clone_libfuzzer.sh | 7 + .../libfuzzer/harness/LibFuzzerRegistry.cpp | 32 + .../fuzzing/libfuzzer/harness/LibFuzzerRegistry.h | 41 + .../fuzzing/libfuzzer/harness/LibFuzzerRunner.cpp | 38 + tools/fuzzing/libfuzzer/harness/LibFuzzerRunner.h | 23 + .../libfuzzer/harness/LibFuzzerTestHarness.h | 298 ++ tools/fuzzing/libfuzzer/harness/moz.build | 19 + tools/fuzzing/libfuzzer/moz.build | 26 + tools/fuzzing/moz.build | 14 + tools/jprof/README.html | 330 ++ tools/jprof/bfd.cpp | 231 ++ tools/jprof/coff.cpp | 99 + tools/jprof/elf.cpp | 133 + tools/jprof/intcnt.cpp | 71 + tools/jprof/intcnt.h | 38 + tools/jprof/jprofsig | 46 + tools/jprof/leaky.cpp | 863 ++++++ tools/jprof/leaky.h | 122 + tools/jprof/moz.build | 28 + tools/jprof/split-profile.py | 143 + tools/jprof/strset.cpp | 40 + tools/jprof/strset.h | 19 + tools/jprof/stub/Makefile.in | 8 + tools/jprof/stub/config.h | 18 + tools/jprof/stub/jprof.h | 17 + tools/jprof/stub/libmalloc.cpp | 790 +++++ tools/jprof/stub/libmalloc.h | 45 + tools/jprof/stub/moz.build | 17 + tools/leak-gauge/leak-gauge.html | 302 ++ tools/leak-gauge/leak-gauge.pl | 239 ++ tools/lint/docs/Makefile | 192 ++ tools/lint/docs/conf.py | 112 + tools/lint/docs/create.rst | 153 + tools/lint/docs/index.rst | 37 + tools/lint/docs/linters/eslint-plugin-mozilla.rst | 174 ++ tools/lint/docs/linters/eslint.rst | 45 + tools/lint/docs/linters/flake8.rst | 50 + tools/lint/docs/make.bat | 263 ++ tools/lint/docs/usage.rst | 41 + tools/lint/eslint.lint | 368 +++ tools/lint/eslint/eslint-plugin-mozilla/LICENSE | 363 +++ .../eslint/eslint-plugin-mozilla/lib/globals.js | 188 ++ .../eslint/eslint-plugin-mozilla/lib/helpers.js | 524 ++++ .../lint/eslint/eslint-plugin-mozilla/lib/index.js | 45 + .../lib/processors/xbl-bindings.js | 363 +++ .../eslint-plugin-mozilla/lib/rules/.eslintrc.js | 51 + .../lib/rules/balanced-listeners.js | 113 + .../lib/rules/import-browserjs-globals.js | 83 + .../lib/rules/import-globals.js | 15 + .../lib/rules/import-headjs-globals.js | 49 + .../lib/rules/mark-test-function-used.js | 37 + .../eslint-plugin-mozilla/lib/rules/no-aArgs.js | 55 + .../lib/rules/no-cpows-in-tests.js | 112 + .../lib/rules/no-single-arg-cu-import.js | 39 + .../lib/rules/reject-importGlobalProperties.js | 37 + .../lib/rules/reject-some-requires.js | 48 + .../lib/rules/var-only-at-top-level.js | 34 + .../lint/eslint/eslint-plugin-mozilla/package.json | 29 + tools/lint/eslint/manifest.tt | 9 + tools/lint/eslint/modules.json | 247 ++ tools/lint/eslint/npm-shrinkwrap.json | 718 +++++ tools/lint/eslint/package.json | 16 + tools/lint/eslint/update | 70 + tools/lint/flake8.lint | 195 ++ tools/lint/flake8/flake8_requirements.txt | 4 + tools/lint/mach_commands.py | 62 + tools/lint/wpt.lint | 55 + tools/lint/wpt_manifest.lint | 34 + tools/mach_commands.py | 364 +++ tools/memory-profiler/CompactTraceTable.h | 116 + tools/memory-profiler/GCHeapProfilerImpl.cpp | 168 ++ tools/memory-profiler/GCHeapProfilerImpl.h | 53 + tools/memory-profiler/MemoryProfiler.cpp | 324 ++ tools/memory-profiler/MemoryProfiler.h | 159 + tools/memory-profiler/NativeProfilerImpl.cpp | 82 + tools/memory-profiler/NativeProfilerImpl.h | 43 + tools/memory-profiler/UncensoredAllocator.cpp | 121 + tools/memory-profiler/UncensoredAllocator.h | 48 + tools/memory-profiler/moz.build | 29 + tools/memory-profiler/nsIMemoryProfiler.idl | 72 + tools/memory-profiler/nsMemoryProfilerFactory.cpp | 32 + tools/memory/collect_b2g_uss_data.sh | 16 + tools/mercurial/eslintvalidate.py | 76 + tools/moz.build | 7 + tools/power/mach_commands.py | 142 + tools/power/moz.build | 21 + tools/power/rapl.cpp | 900 ++++++ tools/profiler/core/EHABIStackWalk.cpp | 678 +++++ tools/profiler/core/EHABIStackWalk.h | 28 + tools/profiler/core/GeckoSampler.cpp | 1306 ++++++++ tools/profiler/core/GeckoSampler.h | 181 ++ tools/profiler/core/IntelPowerGadget.cpp | 310 ++ tools/profiler/core/IntelPowerGadget.h | 150 + tools/profiler/core/PlatformMacros.h | 76 + tools/profiler/core/ProfileBuffer.cpp | 89 + tools/profiler/core/ProfileBuffer.h | 61 + tools/profiler/core/ProfileEntry.cpp | 881 ++++++ tools/profiler/core/ProfileEntry.h | 407 +++ tools/profiler/core/ProfileJSONWriter.cpp | 115 + tools/profiler/core/ProfileJSONWriter.h | 126 + tools/profiler/core/ProfilerBacktrace.cpp | 33 + tools/profiler/core/ProfilerMarkers.cpp | 210 ++ tools/profiler/core/StackTop.cpp | 48 + tools/profiler/core/StackTop.h | 10 + tools/profiler/core/SyncProfile.cpp | 57 + tools/profiler/core/SyncProfile.h | 43 + tools/profiler/core/ThreadInfo.cpp | 73 + tools/profiler/core/ThreadInfo.h | 66 + tools/profiler/core/ThreadProfile.cpp | 260 ++ tools/profiler/core/ThreadProfile.h | 107 + tools/profiler/core/platform-linux.cc | 715 +++++ tools/profiler/core/platform-macos.cc | 469 +++ tools/profiler/core/platform-win32.cc | 431 +++ tools/profiler/core/platform.cpp | 1266 ++++++++ tools/profiler/core/platform.h | 431 +++ tools/profiler/core/shared-libraries-linux.cc | 159 + tools/profiler/core/shared-libraries-macos.cc | 132 + tools/profiler/core/shared-libraries-win32.cc | 137 + tools/profiler/core/v8-support.h | 48 + tools/profiler/gecko/ProfileGatherer.cpp | 207 ++ tools/profiler/gecko/Profiler.jsm | 16 + .../profiler/gecko/ProfilerIOInterposeObserver.cpp | 30 + tools/profiler/gecko/ProfilerIOInterposeObserver.h | 28 + tools/profiler/gecko/ProfilerTypes.ipdlh | 16 + tools/profiler/gecko/SaveProfileTask.cpp | 45 + tools/profiler/gecko/SaveProfileTask.h | 54 + tools/profiler/gecko/ThreadResponsiveness.cpp | 118 + tools/profiler/gecko/ThreadResponsiveness.h | 38 + tools/profiler/gecko/nsIProfileSaveEvent.idl | 19 + tools/profiler/gecko/nsIProfiler.idl | 101 + tools/profiler/gecko/nsProfiler.cpp | 308 ++ tools/profiler/gecko/nsProfiler.h | 29 + tools/profiler/gecko/nsProfilerCIID.h | 14 + tools/profiler/gecko/nsProfilerFactory.cpp | 31 + tools/profiler/gecko/nsProfilerStartParams.cpp | 67 + tools/profiler/gecko/nsProfilerStartParams.h | 32 + tools/profiler/lul/AutoObjectMapper.cpp | 207 ++ tools/profiler/lul/AutoObjectMapper.h | 115 + tools/profiler/lul/LulCommon.cpp | 114 + tools/profiler/lul/LulCommonExt.h | 554 ++++ tools/profiler/lul/LulDwarf.cpp | 2180 ++++++++++++++ tools/profiler/lul/LulDwarfExt.h | 1287 ++++++++ tools/profiler/lul/LulDwarfInt.h | 194 ++ tools/profiler/lul/LulDwarfSummariser.cpp | 359 +++ tools/profiler/lul/LulDwarfSummariser.h | 65 + tools/profiler/lul/LulElf.cpp | 915 ++++++ tools/profiler/lul/LulElfExt.h | 68 + tools/profiler/lul/LulElfInt.h | 234 ++ tools/profiler/lul/LulMain.cpp | 1963 ++++++++++++ tools/profiler/lul/LulMain.h | 397 +++ tools/profiler/lul/LulMainInt.h | 393 +++ tools/profiler/lul/LulPlatformMacros.h | 53 + tools/profiler/lul/platform-linux-lul.cpp | 88 + tools/profiler/lul/platform-linux-lul.h | 24 + tools/profiler/merge-profiles.py | 113 + tools/profiler/moz.build | 147 + tools/profiler/nm-symbolicate.py | 48 + tools/profiler/public/GeckoProfiler.h | 300 ++ tools/profiler/public/GeckoProfilerFunc.h | 125 + tools/profiler/public/GeckoProfilerImpl.h | 522 ++++ tools/profiler/public/ProfileGatherer.h | 42 + tools/profiler/public/ProfilerBacktrace.h | 36 + tools/profiler/public/ProfilerMarkers.h | 193 ++ tools/profiler/public/PseudoStack.h | 469 +++ tools/profiler/public/shared-libraries.h | 137 + tools/profiler/tasktracer/GeckoTaskTracer.cpp | 472 +++ tools/profiler/tasktracer/GeckoTaskTracer.h | 92 + tools/profiler/tasktracer/GeckoTaskTracerImpl.h | 102 + tools/profiler/tasktracer/SourceEventTypeMap.h | 11 + tools/profiler/tasktracer/TracedTaskCommon.cpp | 169 ++ tools/profiler/tasktracer/TracedTaskCommon.h | 73 + tools/profiler/tests/gtest/LulTest.cpp | 51 + tools/profiler/tests/gtest/LulTestDwarf.cpp | 2597 ++++++++++++++++ .../profiler/tests/gtest/LulTestInfrastructure.cpp | 491 +++ tools/profiler/tests/gtest/LulTestInfrastructure.h | 666 +++++ tools/profiler/tests/gtest/ThreadProfileTest.cpp | 75 + tools/profiler/tests/gtest/moz.build | 30 + tools/profiler/tests/head_profiler.js | 31 + tools/profiler/tests/test_asm.js | 79 + tools/profiler/tests/test_enterjit_osr.js | 59 + .../profiler/tests/test_enterjit_osr_disabling.js | 21 + tools/profiler/tests/test_enterjit_osr_enabling.js | 21 + tools/profiler/tests/test_get_features.js | 18 + tools/profiler/tests/test_pause.js | 35 + tools/profiler/tests/test_run.js | 44 + tools/profiler/tests/test_shared_library.js | 23 + tools/profiler/tests/test_start.js | 25 + tools/profiler/tests/xpcshell.ini | 18 + tools/quitter/Makefile.in | 6 + tools/quitter/QuitterObserver.js | 70 + tools/quitter/chrome.manifest | 4 + tools/quitter/contentscript.js | 37 + tools/quitter/install.rdf | 35 + tools/quitter/jar.mn | 3 + tools/quitter/moz.build | 21 + tools/quitter/quitter@mozilla.org.xpi | Bin 0 -> 6864 bytes tools/rb/README | 7 + tools/rb/filter-log.pl | 44 + tools/rb/find-comptr-leakers.pl | 114 + tools/rb/find_leakers.py | 100 + tools/rb/fix_linux_stack.py | 317 ++ tools/rb/fix_macosx_stack.py | 133 + tools/rb/fix_stack_using_bpsyms.py | 163 + tools/rb/make-tree.pl | 303 ++ tools/rewriting/ThirdPartyPaths.txt | 63 + tools/update-packaging/Makefile.in | 79 + tools/update-packaging/README | 4 + tools/update-packaging/common.sh | 205 ++ tools/update-packaging/generatesnippet.py | 166 ++ tools/update-packaging/make_full_update.sh | 118 + tools/update-packaging/make_incremental_update.sh | 327 ++ tools/update-packaging/make_incremental_updates.py | 560 ++++ tools/update-packaging/moz.build | 6 + tools/update-packaging/test/buildrefmars.sh | 27 + tools/update-packaging/test/catmanifest.sh | 14 + tools/update-packaging/test/common.sh | 202 ++ tools/update-packaging/test/diffmar.sh | 51 + .../Contents/MacOS/diff-patch-larger-than-file.txt | 1 + .../test/from-mac/Contents/MacOS/force.txt | 1 + .../test/from-mac/Contents/MacOS/removed.txt | 1 + .../test/from-mac/Contents/MacOS/same.bin | Bin 0 -> 200 bytes .../test/from-mac/Contents/MacOS/update.manifest | 1 + .../MacOS/{foodir/diff-patch-larger-than-file.txt | 1 + .../from-mac/Contents/MacOS/{foodir/readme.txt | 1 + .../from-mac/Contents/MacOS/{foodir/removed.txt | 1 + .../test/from-mac/Contents/MacOS/{foodir/same.bin | Bin 0 -> 200 bytes .../test/from-mac/Contents/MacOS/{foodir/same.txt | 1 + .../Contents/MacOS/{foodir/update.manifest | 1 + .../from-mac/Contents/Resources/application.ini | 5 + .../diff/diff-patch-larger-than-file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../test/from-mac/Contents/Resources/precomplete | 26 + .../test/from-mac/Contents/Resources/readme.txt | 2 + .../test/from-mac/Contents/Resources/removed-files | 8 + .../test/from-mac/Contents/Resources/removed.txt | 1 + .../test/from-mac/Contents/Resources/same.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../Contents/Resources/update-settings.ini | 1 + .../Contents/Resources/{foodir/channel-prefs.js | 1 + .../from-mac/Contents/Resources/{foodir/force.txt | 1 + tools/update-packaging/test/from/application.ini | 5 + .../test/from/diff-patch-larger-than-file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + tools/update-packaging/test/from/force.txt | 1 + tools/update-packaging/test/from/precomplete | 23 + tools/update-packaging/test/from/readme.txt | 2 + tools/update-packaging/test/from/removed-files | 8 + tools/update-packaging/test/from/removed.txt | 1 + tools/update-packaging/test/from/same.bin | Bin 0 -> 200 bytes tools/update-packaging/test/from/same.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../update-packaging/test/from/update-settings.ini | 1 + tools/update-packaging/test/from/update.manifest | 1 + .../test/from/{foodir/channel-prefs.js | 1 + .../from/{foodir/diff-patch-larger-than-file.txt | 1 + tools/update-packaging/test/from/{foodir/force.txt | 1 + .../update-packaging/test/from/{foodir/readme.txt | 1 + .../update-packaging/test/from/{foodir/removed.txt | 1 + tools/update-packaging/test/from/{foodir/same.bin | Bin 0 -> 200 bytes tools/update-packaging/test/from/{foodir/same.txt | 1 + .../test/from/{foodir/update.manifest | 1 + tools/update-packaging/test/make_full_update.sh | 119 + tools/update-packaging/test/runtests.sh | 12 + tools/update-packaging/test/testpatchfile.txt | 2 + .../test/to-mac/Contents/MacOS/addFeedPrefs.js | 1 + .../test/to-mac/Contents/MacOS/added.txt | 1 + .../Contents/MacOS/diff-patch-larger-than-file.bin | Bin 0 -> 200 bytes .../Contents/MacOS/diff-patch-larger-than-file.txt | 1 + .../test/to-mac/Contents/MacOS/force.txt | 1 + .../test/to-mac/Contents/MacOS/same.bin | Bin 0 -> 200 bytes .../test/to-mac/Contents/MacOS/update.manifest | 1 + .../test/to-mac/Contents/MacOS/{foodir/added.txt | 1 + .../MacOS/{foodir/diff-patch-larger-than-file.txt | 1 + .../test/to-mac/Contents/MacOS/{foodir/readme.txt | 1 + .../test/to-mac/Contents/MacOS/{foodir/same.bin | Bin 0 -> 200 bytes .../test/to-mac/Contents/MacOS/{foodir/same.txt | 1 + .../to-mac/Contents/MacOS/{foodir/update.manifest | 1 + .../test/to-mac/Contents/Resources/application.ini | 5 + .../distribution/extensions/added/file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../Contents/Resources/extensions/added/file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../test/to-mac/Contents/Resources/precomplete | 33 + .../test/to-mac/Contents/Resources/readme.txt | 1 + .../test/to-mac/Contents/Resources/removed-files | 14 + .../test/to-mac/Contents/Resources/same.txt | 1 + .../Resources/searchplugins/added/file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../to-mac/Contents/Resources/update-settings.ini | 1 + .../Contents/Resources/{foodir/channel-prefs.js | 1 + .../to-mac/Contents/Resources/{foodir/force.txt | 1 + tools/update-packaging/test/to/addFeedPrefs.js | 1 + tools/update-packaging/test/to/added.txt | 1 + tools/update-packaging/test/to/application.ini | 5 + .../test/to/diff-patch-larger-than-file.bin | Bin 0 -> 200 bytes .../test/to/diff-patch-larger-than-file.txt | 1 + .../test/to/distribution/extensions/added/file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + .../test/to/extensions/added/file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + tools/update-packaging/test/to/force.txt | 1 + tools/update-packaging/test/to/precomplete | 30 + tools/update-packaging/test/to/readme.txt | 1 + tools/update-packaging/test/to/removed-files | 14 + tools/update-packaging/test/to/same.bin | Bin 0 -> 200 bytes tools/update-packaging/test/to/same.txt | 1 + .../test/to/searchplugins/added/file.txt | 1 + .../diff/diff-patch-larger-than-file.txt | 1 + tools/update-packaging/test/to/update-settings.ini | 1 + tools/update-packaging/test/to/update.manifest | 1 + tools/update-packaging/test/to/{foodir/added.txt | 1 + .../test/to/{foodir/channel-prefs.js | 1 + .../to/{foodir/diff-patch-larger-than-file.txt | 1 + tools/update-packaging/test/to/{foodir/force.txt | 1 + tools/update-packaging/test/to/{foodir/readme.txt | 1 + tools/update-packaging/test/to/{foodir/same.bin | Bin 0 -> 200 bytes tools/update-packaging/test/to/{foodir/same.txt | 1 + .../test/to/{foodir/update.manifest | 1 + .../test_make_incremental_updates.py | 151 + tools/update-packaging/unwrap_full_update.pl | 67 + 356 files changed, 48605 insertions(+) create mode 100755 tools/bloatview/bloatdiff.pl create mode 100755 tools/bloatview/bloattable.pl create mode 100755 tools/check-moz-style/checkmozstyle.py create mode 100644 tools/check-moz-style/diff_parser.py create mode 100644 tools/check-moz-style/modules/__init__.py create mode 100644 tools/check-moz-style/modules/cpplint.py create mode 100644 tools/check-moz-style/modules/diff_parser.py create mode 100644 tools/check-moz-style/modules/logging.py create mode 100644 tools/check-moz-style/modules/scm.py create mode 100755 tools/check-moz-style/run_tests.py create mode 100644 tools/check-moz-style/tests/test1.cpp create mode 100644 tools/check-moz-style/tests/test1.out create mode 100644 tools/check-moz-style/tests/test1.patch create mode 100644 tools/check-moz-style/tests/test2.cpp create mode 100644 tools/check-moz-style/tests/test2.out create mode 100644 tools/check-moz-style/tests/test2.patch create mode 100644 tools/check-moz-style/tests/test3.out create mode 100644 tools/check-moz-style/tests/test3.patch create mode 100644 tools/check-moz-style/tests/test4.cpp create mode 100644 tools/check-moz-style/tests/test4.out create mode 100644 tools/check-moz-style/tests/test4.patch create mode 100644 tools/check-moz-style/tests/test5.cpp create mode 100644 tools/check-moz-style/tests/test5.out create mode 100644 tools/check-moz-style/tests/test5.patch create mode 100644 tools/coverity/model.cpp create mode 100644 tools/docs/Vagrantfile create mode 100644 tools/docs/conf.py create mode 100644 tools/docs/index.rst create mode 100644 tools/docs/mach_commands.py create mode 100644 tools/docs/moztreedocs/__init__.py create mode 100644 tools/fuzzing/interface/FuzzingInterface.cpp create mode 100644 tools/fuzzing/interface/FuzzingInterface.h create mode 100644 tools/fuzzing/interface/moz.build create mode 100644 tools/fuzzing/libfuzzer/FuzzerCustomMain.cpp create mode 100644 tools/fuzzing/libfuzzer/Makefile.in create mode 100755 tools/fuzzing/libfuzzer/clone_libfuzzer.sh create mode 100644 tools/fuzzing/libfuzzer/harness/LibFuzzerRegistry.cpp create mode 100644 tools/fuzzing/libfuzzer/harness/LibFuzzerRegistry.h create mode 100644 tools/fuzzing/libfuzzer/harness/LibFuzzerRunner.cpp create mode 100644 tools/fuzzing/libfuzzer/harness/LibFuzzerRunner.h create mode 100644 tools/fuzzing/libfuzzer/harness/LibFuzzerTestHarness.h create mode 100644 tools/fuzzing/libfuzzer/harness/moz.build create mode 100644 tools/fuzzing/libfuzzer/moz.build create mode 100644 tools/fuzzing/moz.build create mode 100644 tools/jprof/README.html create mode 100644 tools/jprof/bfd.cpp create mode 100644 tools/jprof/coff.cpp create mode 100644 tools/jprof/elf.cpp create mode 100644 tools/jprof/intcnt.cpp create mode 100644 tools/jprof/intcnt.h create mode 100755 tools/jprof/jprofsig create mode 100644 tools/jprof/leaky.cpp create mode 100644 tools/jprof/leaky.h create mode 100644 tools/jprof/moz.build create mode 100755 tools/jprof/split-profile.py create mode 100644 tools/jprof/strset.cpp create mode 100644 tools/jprof/strset.h create mode 100644 tools/jprof/stub/Makefile.in create mode 100644 tools/jprof/stub/config.h create mode 100644 tools/jprof/stub/jprof.h create mode 100644 tools/jprof/stub/libmalloc.cpp create mode 100644 tools/jprof/stub/libmalloc.h create mode 100644 tools/jprof/stub/moz.build create mode 100644 tools/leak-gauge/leak-gauge.html create mode 100755 tools/leak-gauge/leak-gauge.pl create mode 100644 tools/lint/docs/Makefile create mode 100644 tools/lint/docs/conf.py create mode 100644 tools/lint/docs/create.rst create mode 100644 tools/lint/docs/index.rst create mode 100644 tools/lint/docs/linters/eslint-plugin-mozilla.rst create mode 100644 tools/lint/docs/linters/eslint.rst create mode 100644 tools/lint/docs/linters/flake8.rst create mode 100644 tools/lint/docs/make.bat create mode 100644 tools/lint/docs/usage.rst create mode 100644 tools/lint/eslint.lint create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/LICENSE create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/globals.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/helpers.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/index.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/processors/xbl-bindings.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/.eslintrc.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/balanced-listeners.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/import-browserjs-globals.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/import-globals.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/import-headjs-globals.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/mark-test-function-used.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/no-aArgs.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/no-cpows-in-tests.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/no-single-arg-cu-import.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/reject-importGlobalProperties.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/reject-some-requires.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/lib/rules/var-only-at-top-level.js create mode 100644 tools/lint/eslint/eslint-plugin-mozilla/package.json create mode 100644 tools/lint/eslint/manifest.tt create mode 100644 tools/lint/eslint/modules.json create mode 100644 tools/lint/eslint/npm-shrinkwrap.json create mode 100644 tools/lint/eslint/package.json create mode 100755 tools/lint/eslint/update create mode 100644 tools/lint/flake8.lint create mode 100644 tools/lint/flake8/flake8_requirements.txt create mode 100644 tools/lint/mach_commands.py create mode 100644 tools/lint/wpt.lint create mode 100644 tools/lint/wpt_manifest.lint create mode 100644 tools/mach_commands.py create mode 100644 tools/memory-profiler/CompactTraceTable.h create mode 100644 tools/memory-profiler/GCHeapProfilerImpl.cpp create mode 100644 tools/memory-profiler/GCHeapProfilerImpl.h create mode 100644 tools/memory-profiler/MemoryProfiler.cpp create mode 100644 tools/memory-profiler/MemoryProfiler.h create mode 100644 tools/memory-profiler/NativeProfilerImpl.cpp create mode 100644 tools/memory-profiler/NativeProfilerImpl.h create mode 100644 tools/memory-profiler/UncensoredAllocator.cpp create mode 100644 tools/memory-profiler/UncensoredAllocator.h create mode 100644 tools/memory-profiler/moz.build create mode 100644 tools/memory-profiler/nsIMemoryProfiler.idl create mode 100644 tools/memory-profiler/nsMemoryProfilerFactory.cpp create mode 100644 tools/memory/collect_b2g_uss_data.sh create mode 100644 tools/mercurial/eslintvalidate.py create mode 100644 tools/moz.build create mode 100644 tools/power/mach_commands.py create mode 100644 tools/power/moz.build create mode 100644 tools/power/rapl.cpp create mode 100644 tools/profiler/core/EHABIStackWalk.cpp create mode 100644 tools/profiler/core/EHABIStackWalk.h create mode 100644 tools/profiler/core/GeckoSampler.cpp create mode 100644 tools/profiler/core/GeckoSampler.h create mode 100644 tools/profiler/core/IntelPowerGadget.cpp create mode 100644 tools/profiler/core/IntelPowerGadget.h create mode 100644 tools/profiler/core/PlatformMacros.h create mode 100644 tools/profiler/core/ProfileBuffer.cpp create mode 100644 tools/profiler/core/ProfileBuffer.h create mode 100644 tools/profiler/core/ProfileEntry.cpp create mode 100644 tools/profiler/core/ProfileEntry.h create mode 100644 tools/profiler/core/ProfileJSONWriter.cpp create mode 100644 tools/profiler/core/ProfileJSONWriter.h create mode 100644 tools/profiler/core/ProfilerBacktrace.cpp create mode 100644 tools/profiler/core/ProfilerMarkers.cpp create mode 100644 tools/profiler/core/StackTop.cpp create mode 100644 tools/profiler/core/StackTop.h create mode 100644 tools/profiler/core/SyncProfile.cpp create mode 100644 tools/profiler/core/SyncProfile.h create mode 100644 tools/profiler/core/ThreadInfo.cpp create mode 100644 tools/profiler/core/ThreadInfo.h create mode 100644 tools/profiler/core/ThreadProfile.cpp create mode 100644 tools/profiler/core/ThreadProfile.h create mode 100644 tools/profiler/core/platform-linux.cc create mode 100644 tools/profiler/core/platform-macos.cc create mode 100644 tools/profiler/core/platform-win32.cc create mode 100644 tools/profiler/core/platform.cpp create mode 100644 tools/profiler/core/platform.h create mode 100644 tools/profiler/core/shared-libraries-linux.cc create mode 100644 tools/profiler/core/shared-libraries-macos.cc create mode 100644 tools/profiler/core/shared-libraries-win32.cc create mode 100644 tools/profiler/core/v8-support.h create mode 100644 tools/profiler/gecko/ProfileGatherer.cpp create mode 100644 tools/profiler/gecko/Profiler.jsm create mode 100644 tools/profiler/gecko/ProfilerIOInterposeObserver.cpp create mode 100644 tools/profiler/gecko/ProfilerIOInterposeObserver.h create mode 100644 tools/profiler/gecko/ProfilerTypes.ipdlh create mode 100644 tools/profiler/gecko/SaveProfileTask.cpp create mode 100644 tools/profiler/gecko/SaveProfileTask.h create mode 100644 tools/profiler/gecko/ThreadResponsiveness.cpp create mode 100644 tools/profiler/gecko/ThreadResponsiveness.h create mode 100644 tools/profiler/gecko/nsIProfileSaveEvent.idl create mode 100644 tools/profiler/gecko/nsIProfiler.idl create mode 100644 tools/profiler/gecko/nsProfiler.cpp create mode 100644 tools/profiler/gecko/nsProfiler.h create mode 100644 tools/profiler/gecko/nsProfilerCIID.h create mode 100644 tools/profiler/gecko/nsProfilerFactory.cpp create mode 100644 tools/profiler/gecko/nsProfilerStartParams.cpp create mode 100644 tools/profiler/gecko/nsProfilerStartParams.h create mode 100644 tools/profiler/lul/AutoObjectMapper.cpp create mode 100644 tools/profiler/lul/AutoObjectMapper.h create mode 100644 tools/profiler/lul/LulCommon.cpp create mode 100644 tools/profiler/lul/LulCommonExt.h create mode 100644 tools/profiler/lul/LulDwarf.cpp create mode 100644 tools/profiler/lul/LulDwarfExt.h create mode 100644 tools/profiler/lul/LulDwarfInt.h create mode 100644 tools/profiler/lul/LulDwarfSummariser.cpp create mode 100644 tools/profiler/lul/LulDwarfSummariser.h create mode 100644 tools/profiler/lul/LulElf.cpp create mode 100644 tools/profiler/lul/LulElfExt.h create mode 100644 tools/profiler/lul/LulElfInt.h create mode 100644 tools/profiler/lul/LulMain.cpp create mode 100644 tools/profiler/lul/LulMain.h create mode 100644 tools/profiler/lul/LulMainInt.h create mode 100644 tools/profiler/lul/LulPlatformMacros.h create mode 100644 tools/profiler/lul/platform-linux-lul.cpp create mode 100644 tools/profiler/lul/platform-linux-lul.h create mode 100755 tools/profiler/merge-profiles.py create mode 100644 tools/profiler/moz.build create mode 100755 tools/profiler/nm-symbolicate.py create mode 100644 tools/profiler/public/GeckoProfiler.h create mode 100644 tools/profiler/public/GeckoProfilerFunc.h create mode 100644 tools/profiler/public/GeckoProfilerImpl.h create mode 100644 tools/profiler/public/ProfileGatherer.h create mode 100644 tools/profiler/public/ProfilerBacktrace.h create mode 100644 tools/profiler/public/ProfilerMarkers.h create mode 100644 tools/profiler/public/PseudoStack.h create mode 100644 tools/profiler/public/shared-libraries.h create mode 100644 tools/profiler/tasktracer/GeckoTaskTracer.cpp create mode 100644 tools/profiler/tasktracer/GeckoTaskTracer.h create mode 100644 tools/profiler/tasktracer/GeckoTaskTracerImpl.h create mode 100644 tools/profiler/tasktracer/SourceEventTypeMap.h create mode 100644 tools/profiler/tasktracer/TracedTaskCommon.cpp create mode 100644 tools/profiler/tasktracer/TracedTaskCommon.h create mode 100644 tools/profiler/tests/gtest/LulTest.cpp create mode 100644 tools/profiler/tests/gtest/LulTestDwarf.cpp create mode 100644 tools/profiler/tests/gtest/LulTestInfrastructure.cpp create mode 100644 tools/profiler/tests/gtest/LulTestInfrastructure.h create mode 100644 tools/profiler/tests/gtest/ThreadProfileTest.cpp create mode 100644 tools/profiler/tests/gtest/moz.build create mode 100644 tools/profiler/tests/head_profiler.js create mode 100644 tools/profiler/tests/test_asm.js create mode 100644 tools/profiler/tests/test_enterjit_osr.js create mode 100644 tools/profiler/tests/test_enterjit_osr_disabling.js create mode 100644 tools/profiler/tests/test_enterjit_osr_enabling.js create mode 100644 tools/profiler/tests/test_get_features.js create mode 100644 tools/profiler/tests/test_pause.js create mode 100644 tools/profiler/tests/test_run.js create mode 100644 tools/profiler/tests/test_shared_library.js create mode 100644 tools/profiler/tests/test_start.js create mode 100644 tools/profiler/tests/xpcshell.ini create mode 100644 tools/quitter/Makefile.in create mode 100644 tools/quitter/QuitterObserver.js create mode 100644 tools/quitter/chrome.manifest create mode 100644 tools/quitter/contentscript.js create mode 100644 tools/quitter/install.rdf create mode 100644 tools/quitter/jar.mn create mode 100644 tools/quitter/moz.build create mode 100644 tools/quitter/quitter@mozilla.org.xpi create mode 100644 tools/rb/README create mode 100755 tools/rb/filter-log.pl create mode 100755 tools/rb/find-comptr-leakers.pl create mode 100755 tools/rb/find_leakers.py create mode 100755 tools/rb/fix_linux_stack.py create mode 100755 tools/rb/fix_macosx_stack.py create mode 100755 tools/rb/fix_stack_using_bpsyms.py create mode 100755 tools/rb/make-tree.pl create mode 100644 tools/rewriting/ThirdPartyPaths.txt create mode 100644 tools/update-packaging/Makefile.in create mode 100644 tools/update-packaging/README create mode 100755 tools/update-packaging/common.sh create mode 100644 tools/update-packaging/generatesnippet.py create mode 100755 tools/update-packaging/make_full_update.sh create mode 100755 tools/update-packaging/make_incremental_update.sh create mode 100755 tools/update-packaging/make_incremental_updates.py create mode 100644 tools/update-packaging/moz.build create mode 100755 tools/update-packaging/test/buildrefmars.sh create mode 100755 tools/update-packaging/test/catmanifest.sh create mode 100755 tools/update-packaging/test/common.sh create mode 100755 tools/update-packaging/test/diffmar.sh create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/force.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/removed.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/same.bin create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/update.manifest create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/{foodir/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/{foodir/readme.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/{foodir/removed.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/{foodir/same.bin create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/{foodir/same.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/MacOS/{foodir/update.manifest create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/application.ini create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/distribution/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/precomplete create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/readme.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/removed-files create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/removed.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/same.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/searchplugins/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/update-settings.ini create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/{foodir/channel-prefs.js create mode 100644 tools/update-packaging/test/from-mac/Contents/Resources/{foodir/force.txt create mode 100644 tools/update-packaging/test/from/application.ini create mode 100644 tools/update-packaging/test/from/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from/distribution/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from/force.txt create mode 100644 tools/update-packaging/test/from/precomplete create mode 100644 tools/update-packaging/test/from/readme.txt create mode 100644 tools/update-packaging/test/from/removed-files create mode 100644 tools/update-packaging/test/from/removed.txt create mode 100644 tools/update-packaging/test/from/same.bin create mode 100644 tools/update-packaging/test/from/same.txt create mode 100644 tools/update-packaging/test/from/searchplugins/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from/update-settings.ini create mode 100644 tools/update-packaging/test/from/update.manifest create mode 100644 tools/update-packaging/test/from/{foodir/channel-prefs.js create mode 100644 tools/update-packaging/test/from/{foodir/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/from/{foodir/force.txt create mode 100644 tools/update-packaging/test/from/{foodir/readme.txt create mode 100644 tools/update-packaging/test/from/{foodir/removed.txt create mode 100644 tools/update-packaging/test/from/{foodir/same.bin create mode 100644 tools/update-packaging/test/from/{foodir/same.txt create mode 100644 tools/update-packaging/test/from/{foodir/update.manifest create mode 100755 tools/update-packaging/test/make_full_update.sh create mode 100755 tools/update-packaging/test/runtests.sh create mode 100644 tools/update-packaging/test/testpatchfile.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/addFeedPrefs.js create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/added.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/diff-patch-larger-than-file.bin create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/force.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/same.bin create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/update.manifest create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/{foodir/added.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/{foodir/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/{foodir/readme.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/{foodir/same.bin create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/{foodir/same.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/MacOS/{foodir/update.manifest create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/application.ini create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/distribution/extensions/added/file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/distribution/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/extensions/added/file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/precomplete create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/readme.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/removed-files create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/same.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/searchplugins/added/file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/searchplugins/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/update-settings.ini create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/{foodir/channel-prefs.js create mode 100644 tools/update-packaging/test/to-mac/Contents/Resources/{foodir/force.txt create mode 100644 tools/update-packaging/test/to/addFeedPrefs.js create mode 100644 tools/update-packaging/test/to/added.txt create mode 100644 tools/update-packaging/test/to/application.ini create mode 100644 tools/update-packaging/test/to/diff-patch-larger-than-file.bin create mode 100644 tools/update-packaging/test/to/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to/distribution/extensions/added/file.txt create mode 100644 tools/update-packaging/test/to/distribution/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to/extensions/added/file.txt create mode 100644 tools/update-packaging/test/to/extensions/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to/force.txt create mode 100644 tools/update-packaging/test/to/precomplete create mode 100644 tools/update-packaging/test/to/readme.txt create mode 100644 tools/update-packaging/test/to/removed-files create mode 100644 tools/update-packaging/test/to/same.bin create mode 100644 tools/update-packaging/test/to/same.txt create mode 100644 tools/update-packaging/test/to/searchplugins/added/file.txt create mode 100644 tools/update-packaging/test/to/searchplugins/diff/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to/update-settings.ini create mode 100644 tools/update-packaging/test/to/update.manifest create mode 100644 tools/update-packaging/test/to/{foodir/added.txt create mode 100644 tools/update-packaging/test/to/{foodir/channel-prefs.js create mode 100644 tools/update-packaging/test/to/{foodir/diff-patch-larger-than-file.txt create mode 100644 tools/update-packaging/test/to/{foodir/force.txt create mode 100644 tools/update-packaging/test/to/{foodir/readme.txt create mode 100644 tools/update-packaging/test/to/{foodir/same.bin create mode 100644 tools/update-packaging/test/to/{foodir/same.txt create mode 100644 tools/update-packaging/test/to/{foodir/update.manifest create mode 100644 tools/update-packaging/test_make_incremental_updates.py create mode 100755 tools/update-packaging/unwrap_full_update.pl (limited to 'tools') diff --git a/tools/bloatview/bloatdiff.pl b/tools/bloatview/bloatdiff.pl new file mode 100755 index 000000000..8c93ad2b0 --- /dev/null +++ b/tools/bloatview/bloatdiff.pl @@ -0,0 +1,372 @@ +#!/usr/bin/perl -w +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +################################################################################ + +sub usage() { + print < a.out +# **make change** +# firefox-bin -P default resource:///res/bloatcycle.html > b.out +# bloatdiff.pl a.out b.out + +EOUSAGE +} + +$OLDFILE = $ARGV[0]; +$NEWFILE = $ARGV[1]; +#$LABEL = $ARGV[2]; + +if (!$OLDFILE or + ! -e $OLDFILE or + -z $OLDFILE) { + print "\nError: Previous log file not specified, does not exist, or is empty.\n\n"; + &usage(); + exit 1; +} + +if (!$NEWFILE or + ! -e $NEWFILE or + -z $NEWFILE) { + print "\nError: Current log file not specified, does not exist, or is empty.\n\n"; + &usage(); + exit 1; +} + +sub processFile { + my ($filename, $map, $prevMap) = @_; + open(FH, $filename); + while () { + if (m{ + ^\s*(\d+)\s # Line number + ([\w:]+)\s+ # Name + (-?\d+)\s+ # Size + (-?\d+)\s+ # Leaked + (-?\d+)\s+ # Objects Total + (-?\d+)\s+ # Objects Rem + \(\s*(-?[\d.]+)\s+ # Objects Mean + \+/-\s+ + ([\w.]+)\)\s+ # Objects StdDev + (-?\d+)\s+ # Reference Total + (-?\d+)\s+ # Reference Rem + \(\s*(-?[\d.]+)\s+ # Reference Mean + \+/-\s+ + ([\w\.]+)\) # Reference StdDev + }x) { + $$map{$2} = { name => $2, + size => $3, + leaked => $4, + objTotal => $5, + objRem => $6, + objMean => $7, + objStdDev => $8, + refTotal => $9, + refRem => $10, + refMean => $11, + refStdDev => $12, + bloat => $3 * $5 # size * objTotal + }; + } else { +# print "failed to parse: $_\n"; + } + } + close(FH); +} + +%oldMap = (); +processFile($OLDFILE, \%oldMap); + +%newMap = (); +processFile($NEWFILE, \%newMap); + +################################################################################ + +$inf = 9999999.99; + +sub getLeaksDelta { + my ($key) = @_; + my $oldLeaks = $oldMap{$key}{leaked} || 0; + my $newLeaks = $newMap{$key}{leaked}; + my $percentLeaks = 0; + if ($oldLeaks == 0) { + if ($newLeaks != 0) { + # there weren't any leaks before, but now there are! + $percentLeaks = $inf; + } + } + else { + $percentLeaks = ($newLeaks - $oldLeaks) / $oldLeaks * 100; + } + # else we had no record of this class before + return ($newLeaks - $oldLeaks, $percentLeaks); +} + +################################################################################ + +sub getBloatDelta { + my ($key) = @_; + my $newBloat = $newMap{$key}{bloat}; + my $percentBloat = 0; + my $oldSize = $oldMap{$key}{size} || 0; + my $oldTotal = $oldMap{$key}{objTotal} || 0; + my $oldBloat = $oldTotal * $oldSize; + if ($oldBloat == 0) { + if ($newBloat != 0) { + # this class wasn't used before, but now it is + $percentBloat = $inf; + } + } + else { + $percentBloat = ($newBloat - $oldBloat) / $oldBloat * 100; + } + # else we had no record of this class before + return ($newBloat - $oldBloat, $percentBloat); +} + +################################################################################ + +foreach $key (keys %newMap) { + my ($newLeaks, $percentLeaks) = getLeaksDelta($key); + my ($newBloat, $percentBloat) = getBloatDelta($key); + $newMap{$key}{leakDelta} = $newLeaks; + $newMap{$key}{leakPercent} = $percentLeaks; + $newMap{$key}{bloatDelta} = $newBloat; + $newMap{$key}{bloatPercent} = $percentBloat; +} + +################################################################################ + +# Print a value of bytes out in a reasonable +# KB, MB, or GB form. Copied from build-seamonkey-util.pl, sorry. -mcafee +sub PrintSize($) { + + # print a number with 3 significant figures + sub PrintNum($) { + my ($num) = @_; + my $rv; + if ($num < 1) { + $rv = sprintf "%.3f", ($num); + } elsif ($num < 10) { + $rv = sprintf "%.2f", ($num); + } elsif ($num < 100) { + $rv = sprintf "%.1f", ($num); + } else { + $rv = sprintf "%d", ($num); + } + } + + my ($size) = @_; + my $rv; + if ($size > 1000000000) { + $rv = PrintNum($size / 1000000000.0) . "G"; + } elsif ($size > 1000000) { + $rv = PrintNum($size / 1000000.0) . "M"; + } elsif ($size > 1000) { + $rv = PrintNum($size / 1000.0) . "K"; + } else { + $rv = PrintNum($size); + } +} + + +print "Bloat/Leak Delta Report\n"; +print "--------------------------------------------------------------------------------------\n"; +print "Current file: $NEWFILE\n"; +print "Previous file: $OLDFILE\n"; +print "----------------------------------------------leaks------leaks%------bloat------bloat%\n"; + + if (! $newMap{"TOTAL"} or + ! $newMap{"TOTAL"}{bloat}) { + # It's OK if leaked or leakPercent are 0 (in fact, that would be good). + # If bloatPercent is zero, it is also OK, because we may have just had + # two runs exactly the same or with no new bloat. + print "\nError: unable to calculate bloat/leak data.\n"; + print "There is no data present.\n\n"; + print "HINT - Did your test run complete successfully?\n"; + print "HINT - Are you pointing at the right log files?\n\n"; + &usage(); + exit 1; + } + +printf "%-40s %10s %10.2f%% %10s %10.2f%%\n", + ("TOTAL", + $newMap{"TOTAL"}{leaked}, $newMap{"TOTAL"}{leakPercent}, + $newMap{"TOTAL"}{bloat}, $newMap{"TOTAL"}{bloatPercent}); + +################################################################################ + +sub percentStr { + my ($p) = @_; + if ($p == $inf) { + return "-"; + } + else { + return sprintf "%10.2f%%", $p; + } +} + +# NEW LEAKS +@keys = sort { $newMap{$b}{leakPercent} <=> $newMap{$a}{leakPercent} } keys %newMap; +my $needsHeading = 1; +my $total = 0; +foreach $key (@keys) { + my $percentLeaks = $newMap{$key}{leakPercent}; + my $leaks = $newMap{$key}{leaked}; + if ($percentLeaks > 0 && $key !~ /TOTAL/) { + if ($needsHeading) { + printf "--NEW-LEAKS-----------------------------------leaks------leaks%%-----------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $leaks, percentStr($percentLeaks)); + $total += $leaks; + } +} +if (!$needsHeading) { + printf "%-40s %10s\n", ("TOTAL", $total); +} + +# FIXED LEAKS +@keys = sort { $newMap{$b}{leakPercent} <=> $newMap{$a}{leakPercent} } keys %newMap; +$needsHeading = 1; +$total = 0; +foreach $key (@keys) { + my $percentLeaks = $newMap{$key}{leakPercent}; + my $leaks = $newMap{$key}{leaked}; + if ($percentLeaks < 0 && $key !~ /TOTAL/) { + if ($needsHeading) { + printf "--FIXED-LEAKS---------------------------------leaks------leaks%%-----------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $leaks, percentStr($percentLeaks)); + $total += $leaks; + } +} +if (!$needsHeading) { + printf "%-40s %10s\n", ("TOTAL", $total); +} + +# NEW BLOAT +@keys = sort { $newMap{$b}{bloatPercent} <=> $newMap{$a}{bloatPercent} } keys %newMap; +$needsHeading = 1; +$total = 0; +foreach $key (@keys) { + my $percentBloat = $newMap{$key}{bloatPercent}; + my $bloat = $newMap{$key}{bloat}; + if ($percentBloat > 0 && $key !~ /TOTAL/) { + if ($needsHeading) { + printf "--NEW-BLOAT-----------------------------------bloat------bloat%%-----------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $bloat, percentStr($percentBloat)); + $total += $bloat; + } +} +if (!$needsHeading) { + printf "%-40s %10s\n", ("TOTAL", $total); +} + +# ALL LEAKS +@keys = sort { $newMap{$b}{leaked} <=> $newMap{$a}{leaked} } keys %newMap; +$needsHeading = 1; +$total = 0; +foreach $key (@keys) { + my $leaks = $newMap{$key}{leaked}; + my $percentLeaks = $newMap{$key}{leakPercent}; + if ($leaks > 0) { + if ($needsHeading) { + printf "--ALL-LEAKS-----------------------------------leaks------leaks%%-----------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $leaks, percentStr($percentLeaks)); + if ($key !~ /TOTAL/) { + $total += $leaks; + } + } +} +if (!$needsHeading) { +# printf "%-40s %10s\n", ("TOTAL", $total); +} + +# ALL BLOAT +@keys = sort { $newMap{$b}{bloat} <=> $newMap{$a}{bloat} } keys %newMap; +$needsHeading = 1; +$total = 0; +foreach $key (@keys) { + my $bloat = $newMap{$key}{bloat}; + my $percentBloat = $newMap{$key}{bloatPercent}; + if ($bloat > 0) { + if ($needsHeading) { + printf "--ALL-BLOAT-----------------------------------bloat------bloat%%-----------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $bloat, percentStr($percentBloat)); + if ($key !~ /TOTAL/) { + $total += $bloat; + } + } +} +if (!$needsHeading) { +# printf "%-40s %10s\n", ("TOTAL", $total); +} + +# NEW CLASSES +@keys = sort { $newMap{$b}{bloatDelta} <=> $newMap{$a}{bloatDelta} } keys %newMap; +$needsHeading = 1; +my $ltotal = 0; +my $btotal = 0; +foreach $key (@keys) { + my $leaks = $newMap{$key}{leaked}; + my $bloat = $newMap{$key}{bloat}; + my $percentBloat = $newMap{$key}{bloatPercent}; + if ($percentBloat == $inf && $key !~ /TOTAL/) { + if ($needsHeading) { + printf "--CLASSES-NOT-REPORTED-LAST-TIME--------------leaks------bloat------------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $leaks, $bloat); + if ($key !~ /TOTAL/) { + $ltotal += $leaks; + $btotal += $bloat; + } + } +} +if (!$needsHeading) { + printf "%-40s %10s %10s\n", ("TOTAL", $ltotal, $btotal); +} + +# OLD CLASSES +@keys = sort { ($oldMap{$b}{bloat} || 0) <=> ($oldMap{$a}{bloat} || 0) } keys %oldMap; +$needsHeading = 1; +$ltotal = 0; +$btotal = 0; +foreach $key (@keys) { + if (!defined($newMap{$key})) { + my $leaks = $oldMap{$key}{leaked}; + my $bloat = $oldMap{$key}{bloat}; + if ($needsHeading) { + printf "--CLASSES-THAT-WENT-AWAY----------------------leaks------bloat------------------------\n"; + $needsHeading = 0; + } + printf "%-40s %10s %10s\n", ($key, $leaks, $bloat); + if ($key !~ /TOTAL/) { + $ltotal += $leaks; + $btotal += $bloat; + } + } +} +if (!$needsHeading) { + printf "%-40s %10s %10s\n", ("TOTAL", $ltotal, $btotal); +} + +print "--------------------------------------------------------------------------------------\n"; diff --git a/tools/bloatview/bloattable.pl b/tools/bloatview/bloattable.pl new file mode 100755 index 000000000..e8acfabed --- /dev/null +++ b/tools/bloatview/bloattable.pl @@ -0,0 +1,590 @@ +#!/usr/bin/perl -w +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# bloattable [-debug] [-source] [-byte n|-obj n|-ref n] ... > +# +# file1, file2, ... filen should be successive BloatView files generated from the same run. +# Summarize them in an HTML table. Output the HTML to the standard output. +# +# If -debug is set, create a slightly larger html file which is more suitable for debugging this script. +# If -source is set, create an html file that prints the html source as the output +# If -byte n, -obj n, or -ref n is given, make the page default to showing byte, object, or reference statistics, +# respectively, and sort by the nth column (n is zero-based, so the first column has n==0). +# +# See http://lxr.mozilla.org/mozilla/source/xpcom/doc/MemoryTools.html + +use 5.004; +use strict; +use diagnostics; +use File::Basename; +use Getopt::Long; + +# The generated HTML is almost entirely generated by a script. Only the , , and elements are explicit +# because a \n"; + print "\n\n"; + print "\n"; + if ($source) { + print "

"; + print quoteHTML "\n"; + print quoteHTML "\n"; + print quoteHTML "\n"; + print "\n"; + print quoteHTML "\n\n"; + print quoteHTML "\n"; + print "\n"; + print quoteHTML "\n"; + print quoteHTML "\n"; + print "

\n"; + } else { + print "\n"; + } + print "\n"; + print "\n"; +} + + + +# Read the bloat file into hash table $h. The hash table is indexed by class names; +# each entry is a list with the following elements: +# bytesAlloc Total number of bytes allocated +# bytesNet Total number of bytes allocated but not deallocated +# objectsAlloc Total number of objects allocated +# objectsNet Total number of objects allocated but not deallocated +# refsAlloc Total number of references AddRef'd +# refsNet Total number of references AddRef'd but not Released +# Except for TOTAL, all hash table entries refer to mutually exclusive data. +# $sizes is a hash table indexed by class names. Each entry of that table contains the class's instance size. +sub readBloatFile($\%\%) { + my ($file, $h, $sizes) = @_; + local $_; # Needed for 'while ()' below. + + my $readSomething = 0; + open FILE, $file; + while () { + if (my ($name, $size, $bytesNet, $objectsAlloc, $objectsNet, $refsAlloc, $refsNet) = + /^\s*(?:\d+)\s+([\w:]+)\s+(\d+)\s+(-?\d+)\s+(\d+)\s+(-?\d+)\s*\([^()]*\)\s*(\d+)\s+(-?\d+)\s*\([^()]*\)\s*$/) { + my $bytesAlloc; + if ($name eq "TOTAL") { + $size = "undefined"; + $bytesAlloc = "undefined"; + } else { + $bytesAlloc = $objectsAlloc * $size; + if ($bytesNet != $objectsNet * $size) { + print STDERR "In '$file', class $name bytesNet != objectsNet * size: $bytesNet != $objectsNet * $size\n"; + } + } + print STDERR "Duplicate entry $name in '$file'\n" if $$h{$name}; + $$h{$name} = [$bytesAlloc, $bytesNet, $objectsAlloc, $objectsNet, $refsAlloc, $refsNet]; + + my $oldSize = $$sizes{$name}; + print STDERR "Mismatch of sizes of class $name: $oldSize and $size\n" if defined($oldSize) && $size ne $oldSize; + $$sizes{$name} = $size; + $readSomething = 1; + } elsif (/^\s*(?:\d+)\s+([\w:]+)\s/) { + print STDERR "Unable to parse '$file' line: $_"; + } + } + close FILE; + print STDERR "No data in '$file'\n" unless $readSomething; + return $h; +} + + +my %sizes; # => +my %tables; # => ; see readBloatFile for format of + +# Generate the JavaScript source code for the row named $c. $l can contain the initial entries of the row. +sub genTableRowSource($$) { + my ($l, $c) = @_; + my $lastE; + foreach (@ARGV) { + my $e = $tables{$_}{$c}; + if (defined($lastE) && !defined($e)) { + $e = [0,0,0,0,0,0]; + print STDERR "Class $c is defined in an earlier file but not in '$_'\n"; + } + if (defined $e) { + if (defined $lastE) { + for (my $i = 0; $i <= $#$e; $i++) { + my $n = $$e[$i]; + $l .= ($n eq "undefined" ? "undefined" : $n - $$lastE[$i]) . ","; + } + $l .= " "; + } else { + $l .= join(",", @$e) . ", "; + } + $lastE = $e; + } else { + $l .= "0,0,0,0,0,0, "; + } + } + $l .= join(",", @$lastE); + return "[$l]"; +} + + + +my $debug; +my $source; +my $showMode; +my $sortColumn; +my @modeOptions; + +GetOptions("debug" => \$debug, "source" => \$source, "byte=i" => \$modeOptions[0], "obj=i" => \$modeOptions[1], "ref=i" => \$modeOptions[2]); +for (my $i = 0; $i != 3; $i++) { + my $modeOption = $modeOptions[$i]; + if ($modeOption) { + die "Only one of -byte, -obj, or -ref may be given" if defined $showMode; + my $nFileColumns = scalar(@ARGV) + 1; + die "-byte, -obj, or -ref column number out of range" if $modeOption < 0 || $modeOption >= 2 + 2*$nFileColumns; + $showMode = $i; + if ($modeOption >= 2) { + $modeOption -= 2; + $sortColumn = 2 + $showMode*2; + if ($modeOption >= $nFileColumns) { + $sortColumn++; + $modeOption -= $nFileColumns; + } + $sortColumn += $modeOption*6; + } else { + $sortColumn = $modeOption; + } + } +} +unless (defined $showMode) { + $showMode = 0; + $sortColumn = 0; +} + +# Read all of the bloat files. +foreach (@ARGV) { + unless ($tables{$_}) { + my $f = $_; + my %table; + + readBloatFile $_, %table, %sizes; + $tables{$_} = \%table; + } +} +die "No input" unless %sizes; + +my @scriptData; # JavaScript source for the tables passed to JavaScript. Each entry is one line of JavaScript. +my @persistentScriptData; # Same as @scriptData, but persists the page reloads itself. + +# Print a list of bloat file names. +push @persistentScriptData, "var nFiles = " . scalar(@ARGV) . ";"; +push @persistentScriptData, "var fileTags = [" . join(", ", map {singleQuoteString substr(fileCoreName($_), -10)} @ARGV) . "];"; +push @persistentScriptData, "var fileNames = [" . join(", ", map {singleQuoteString $_} @ARGV) . "];"; +push @persistentScriptData, "var fileDates = [" . join(", ", map {singleQuoteString localtime fileModDate $_} @ARGV) . "];"; + +# Print the bloat tables. +push @persistentScriptData, "var totals = " . genTableRowSource('"TOTAL", undefined, ', "TOTAL") . ";"; +push @scriptData, "var classTables = ["; +delete $sizes{"TOTAL"}; +my @classes = sort(keys %sizes); +for (my $i = 0; $i <= $#classes; $i++) { + my $c = $classes[$i]; + push @scriptData, genTableRowSource(doubleQuoteString($c).", ".$sizes{$c}.", ", $c) . ($i == $#classes ? "];" : ","); +} + +generate(@scriptData, @persistentScriptData, $debug, $source, $showMode, $sortColumn); +1; + + +# The source of the eval'd JavaScript follows. +# Comments starting with // that are alone on a line are stripped by the Perl script. +__END__ + +// showMode: 0=bytes, 1=objects, 2=references +var showMode; +var modeName; +var modeNameUpper; + +var sortColumn; + +// Sort according to the sortColumn. Column 0 is sorted alphabetically in ascending order. +// All other columns are sorted numerically in descending order, with column 0 used for a secondary sort. +// Undefined is always listed last. +function sortCompare(x, y) { + if (sortColumn) { + var xc = x[sortColumn]; + var yc = y[sortColumn]; + if (xc < yc || xc === undefined && yc !== undefined) return 1; + if (yc < xc || yc === undefined && xc !== undefined) return -1; + } + + var x0 = x[0]; + var y0 = y[0]; + if (x0 > y0 || x0 === undefined && y0 !== undefined) return 1; + if (y0 > x0 || y0 === undefined && x0 !== undefined) return -1; + return 0; +} + + +// Quote special HTML characters in the string. +function quoteHTML(s) { + s = s.replace(/&/g, '&'); + // Can't use //g, '>'); + s = s.replace(/ /g, ' '); + return s; +} + + +function writeFileTable(d) { + d.writeln(''); + d.writeln('\n\n\n\n'); + for (var i = 0; i < nFiles; i++) + d.writeln('\n\n\n\n'); + d.writeln('
NameFileDate
'+quoteHTML(fileTags[i])+''+quoteHTML(fileNames[i])+''+quoteHTML(fileDates[i])+'
'); +} + + +function writeReloadLink(d, column, s, rowspan) { + d.write(rowspan == 1 ? '' : ''); + if (column != sortColumn) + d.write(''); + d.write(s); + if (column != sortColumn) + d.write(''); + d.writeln(''); +} + +function writeClassTableRow(d, row, base, modeName) { + if (modeName) { + d.writeln('\n'+modeName+''); + } else { + d.writeln('\n'+quoteHTML(row[0])+''); + var v = row[1]; + d.writeln(''+(v === undefined ? '' : v)+''); + } + for (var i = 0; i != 2; i++) { + var c = base + i; + for (var j = 0; j <= nFiles; j++) { + v = row[c]; + var style = 'num'; + if (j != nFiles) + if (v > 0) { + style = 'pos'; + v = '+'+v; + } else + style = 'neg'; + d.writeln(''+(v === undefined ? '' : v)+''); + c += 6; + } + } + d.writeln(''); +} + +function writeClassTable(d) { + var base = 2 + showMode*2; + + // Make a copy because a sort is destructive. + var table = classTables.concat(); + table.sort(sortCompare); + + d.writeln(''); + + d.writeln(''); + writeReloadLink(d, 0, 'Class Name', 2); + writeReloadLink(d, 1, 'Instance
Size', 2); + d.writeln(''); + d.writeln('\n'); + d.writeln(''); + for (var i = 0; i != 2; i++) { + var c = base + i; + for (var j = 0; j <= nFiles; j++) { + writeReloadLink(d, c, j == nFiles ? 'Total' : quoteHTML(fileTags[j]), 1); + c += 6; + } + } + d.writeln(''); + + writeClassTableRow(d, totals, base, 0); + for (var r = 0; r < table.length; r++) + writeClassTableRow(d, table[r], base, 0); + + d.writeln('
'+modeNameUpper+'s allocated'+modeNameUpper+'s allocated but not freed
'); +} + + +var modeNames = ["byte", "object", "reference"]; +var modeNamesUpper = ["Byte", "Object", "Reference"]; +var styleSheet = ''; + + +function showHead(d) { + modeName = modeNames[showMode]; + modeNameUpper = modeNamesUpper[showMode]; + d.writeln(''+modeNameUpper+' Bloats'); + d.writeln(styleSheet); +} + +function showBody(d) { + d.writeln('

'+modeNameUpper+' Bloats

'); + writeFileTable(d); + d.write('
'); + for (var i = 0; i != 3; i++) + if (i != showMode) { + var newSortColumn = sortColumn; + if (sortColumn >= 2) + newSortColumn = sortColumn + (i-showMode)*2; + d.write(''); + } + d.writeln('
'); + d.writeln('

The numbers do not include malloc\'d data such as string contents.

'); + d.writeln('

Click on a column heading to sort by that column. Click on a class name to see details for that class.

'); + writeClassTable(d); +} + + +function showRowDetail(rowName) { + var row; + var i; + + if (rowName == "TOTAL") + row = totals; + else { + for (i = 0; i < classTables.length; i++) + if (rowName == classTables[i][0]) { + row = classTables[i]; + break; + } + } + if (row) { + var w = window.open("", "ClassTableRowDetails"); + var d = w.document; + d.open(); + d.writeln(''); + d.writeln('\n\n'+quoteHTML(rowName)+' bloat details'); + d.writeln(styleSheet); + d.writeln('\n\n'); + d.writeln('

'+quoteHTML(rowName)+'

'); + if (row[1] !== undefined) + d.writeln('

Each instance has '+row[1]+' bytes.

'); + + d.writeln(''); + d.writeln('\n\n'); + d.writeln('\n'); + d.writeln('\n'); + for (i = 0; i != 2; i++) + for (var j = 0; j <= nFiles; j++) + d.writeln(''); + d.writeln(''); + + for (i = 0; i != 3; i++) + writeClassTableRow(d, row, 2+i*2, modeNamesUpper[i]+'s'); + + d.writeln('
AllocatedAllocated but not freed
'+(j == nFiles ? 'Total' : quoteHTML(fileTags[j]))+'
\n\n'); + d.close(); + } + return undefined; +} + + +function stringSource(s) { + s = s.replace(/\\/g, '\\\\'); + s = s.replace(/"/g, '\\"'); + s = s.replace(/<\//g, '<\\/'); + return '"'+s+'"'; +} + +function reloadSelf(n,m) { + // Need to cache these because globals go away on document.open(). + var sa = srcArray; + var ss = stringSource; + var ct = classTables; + var i; + + document.open(); + // Uncomment this and comment the document.open() line above to see the reloaded page's source. + //var w = window.open("", "NewDoc"); + //var d = w.document; + //var document = new Object; + //document.write = function () { + // for (var i = 0; i < arguments.length; i++) { + // var s = arguments[i].toString(); + // s = s.replace(/&/g, '&'); + // s = s.replace(/\x3C/g, '<'); + // s = s.replace(/>/g, '>'); + // s = s.replace(/ /g, ' '); + // d.write(s); + // } + //}; + //document.writeln = function () { + // for (var i = 0; i < arguments.length; i++) { + // var s = arguments[i].toString(); + // s = s.replace(/&/g, '&'); + // s = s.replace(/\x3C/g, '<'); + // s = s.replace(/>/g, '>'); + // s = s.replace(/ /g, ' '); + // d.write(s); + // } + // d.writeln('
'); + //}; + + document.writeln(''); + document.writeln('\n\n\n\n\n\n\n\n'); + document.close(); + return undefined; +} diff --git a/tools/check-moz-style/checkmozstyle.py b/tools/check-moz-style/checkmozstyle.py new file mode 100755 index 000000000..d8261aec5 --- /dev/null +++ b/tools/check-moz-style/checkmozstyle.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Script to run the linter for source code of WebKit.""" + +import os +import os.path +import re +import sys + +import modules.cpplint as cpplint +from modules.diff_parser import DiffParser +from modules.scm import detect_scm_system + + +# Override the usage of the lint tool. +cpplint._USAGE = """ +Syntax: %(program_name)s [--verbose=#] [--git-commit=] [--output=vs7] [--filter=-x,+y,...] + + The style guidelines this tries to follow are those in + http://webkit.org/coding/coding-style.html + + Every problem is given a confidence score from 1-5, with 5 meaning we are + certain of the problem, and 1 meaning it could be a legitimate construct. + This will miss some errors, and is not a substitute for a code review. + + To prevent specific lines from being linted, add a '// NOLINT' comment to the + end of the line. + + Linted extensions are .cpp, .c and .h. Other file types will be ignored. + + Flags: + + verbose=# + Specify a number 0-5 to restrict errors to certain verbosity levels. + + git-commit= + Check style for a specified git commit. + Note that the program checks style based on current local file + instead of actual diff of the git commit. So, if the files are + updated after the specified git commit, the information of line + number may be wrong. + + output=vs7 + By default, the output is formatted to ease emacs parsing. Visual Studio + compatible output (vs7) may also be used. Other formats are unsupported. + + filter=-x,+y,... + Specify a comma-separated list of category-filters to apply: only + error messages whose category names pass the filters will be printed. + (Category names are printed with the message and look like + "[whitespace/indent]".) Filters are evaluated left to right. + "-FOO" and "FOO" means "do not print categories that start with FOO". + "+FOO" means "do print categories that start with FOO". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=whitespace,runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + + To see a list of all the categories used in %(program_name)s, pass no arg: + --filter= +""" % {'program_name': sys.argv[0]} + +def process_patch(patch_string, root, cwd, scm): + """Does lint on a single patch. + + Args: + patch_string: A string of a patch. + """ + patch = DiffParser(patch_string.splitlines()) + + if not len(patch.files): + cpplint.error("patch", 0, "patch/notempty", 3, + "Patch does not appear to diff against any file.") + return + + if not patch.status_line: + cpplint.error("patch", 0, "patch/nosummary", 3, + "Patch does not have a summary.") + else: + proper_format = re.match(r"^Bug [0-9]+ - ", patch.status_line) + if not proper_format: + proper_format = re.match(r"^No bug - ", patch.status_line) + cpplint.error("patch", 0, "patch/bugnumber", 3, + "Patch summary should begin with 'Bug XXXXX - ' " + + "or 'No bug -'.") + + if not patch.patch_description: + cpplint.error("patch", 0, "patch/nodescription", 3, + "Patch does not have a description.") + + for filename, diff in patch.files.iteritems(): + file_extension = os.path.splitext(filename)[1] + + if file_extension in ['.cpp', '.c', '.h']: + line_numbers = set() + orig_filename = filename + + def error_for_patch(filename, line_number, category, confidence, + message): + """Wrapper function of cpplint.error for patches. + + This function outputs errors only if the line number + corresponds to lines which are modified or added. + """ + if not line_numbers: + for line in diff.lines: + # When deleted line is not set, it means that + # the line is newly added. + if not line[0]: + line_numbers.add(line[1]) + + if line_number in line_numbers: + cpplint.error(orig_filename, line_number, + category, confidence, message) + + cpplint.process_file(os.path.join(root, filename), + relative_name=orig_filename, + error=error_for_patch) + + +def main(): + cpplint.use_mozilla_styles() + + (args, flags) = cpplint.parse_arguments(sys.argv[1:], ["git-commit="]) + if args: + sys.stderr.write("ERROR: We don't support files as arguments for " + + "now.\n" + cpplint._USAGE) + sys.exit(1) + + cwd = os.path.abspath('.') + scm = detect_scm_system(cwd) + root = scm.find_checkout_root(cwd) + + if "--git-commit" in flags: + process_patch(scm.create_patch_from_local_commit(flags["--git-commit"]), root, cwd, scm) + else: + process_patch(scm.create_patch(), root, cwd, scm) + + sys.stderr.write('Total errors found: %d\n' % cpplint.error_count()) + sys.exit(cpplint.error_count() > 0) + + +if __name__ == "__main__": + main() diff --git a/tools/check-moz-style/diff_parser.py b/tools/check-moz-style/diff_parser.py new file mode 100644 index 000000000..91898af31 --- /dev/null +++ b/tools/check-moz-style/diff_parser.py @@ -0,0 +1,162 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""WebKit's Python module for interacting with patches.""" + +import logging +import re + + +_regexp_compile_cache = {} + + +def match(pattern, string): + """Matches the string with the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = re.compile(pattern) + return _regexp_compile_cache[pattern].match(string) + + +def git_diff_to_svn_diff(line): + """Converts a git formatted diff line to a svn formatted line. + + Args: + line: A string representing a line of the diff. + """ + conversion_patterns = (("^diff --git a/(.+) b/(?P.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"), + ("^new file.*", lambda matched: "\n"), + ("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"), + ("^--- a/(?P.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"), + ("^\+\+\+ b/(?P.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n")) + + for pattern, conversion in conversion_patterns: + matched = match(pattern, line) + if matched: + return conversion(matched) + return line + + +def get_diff_converter(first_diff_line): + """Gets a converter function of diff lines. + + Args: + first_diff_line: The first filename line of a diff file. + If this line is git formatted, we'll return a + converter from git to SVN. + """ + if match(r"^diff --git a/", first_diff_line): + return git_diff_to_svn_diff + return lambda input: input + + +_INITIAL_STATE = 1 +_DECLARED_FILE_PATH = 2 +_PROCESSING_CHUNK = 3 + + +class DiffFile: + """Contains the information for one file in a patch. + + The field "lines" is a list which contains tuples in this format: + (deleted_line_number, new_line_number, line_string) + If deleted_line_number is zero, it means this line is newly added. + If new_line_number is zero, it means this line is deleted. + """ + + def __init__(self, filename): + self.filename = filename + self.lines = [] + + def add_new_line(self, line_number, line): + self.lines.append((0, line_number, line)) + + def add_deleted_line(self, line_number, line): + self.lines.append((line_number, 0, line)) + + def add_unchanged_line(self, deleted_line_number, new_line_number, line): + self.lines.append((deleted_line_number, new_line_number, line)) + + +class DiffParser: + """A parser for a patch file. + + The field "files" is a dict whose key is the filename and value is + a DiffFile object. + """ + + def __init__(self, diff_input): + """Parses a diff. + + Args: + diff_input: An iterable object. + """ + state = _INITIAL_STATE + + self.files = {} + current_file = None + old_diff_line = None + new_diff_line = None + for line in diff_input: + line = line.rstrip("\n") + if state == _INITIAL_STATE: + transform_line = get_diff_converter(line) + line = transform_line(line) + + file_declaration = match(r"^Index: (?P.+)", line) + if file_declaration: + filename = file_declaration.group('FilePath') + current_file = DiffFile(filename) + self.files[filename] = current_file + state = _DECLARED_FILE_PATH + continue + + lines_changed = match(r"^@@ -(?P\d+)(,\d+)? \+(?P\d+)(,\d+)? @@", line) + if lines_changed: + if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK: + logging.error('Unexpected line change without file path declaration: %r' % line) + old_diff_line = int(lines_changed.group('OldStartLine')) + new_diff_line = int(lines_changed.group('NewStartLine')) + state = _PROCESSING_CHUNK + continue + + if state == _PROCESSING_CHUNK: + if line.startswith('+'): + current_file.add_new_line(new_diff_line, line[1:]) + new_diff_line += 1 + elif line.startswith('-'): + current_file.add_deleted_line(old_diff_line, line[1:]) + old_diff_line += 1 + elif line.startswith(' '): + current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:]) + old_diff_line += 1 + new_diff_line += 1 + elif line == '\\ No newline at end of file': + # Nothing to do. We may still have some added lines. + pass + else: + logging.error('Unexpected diff format when parsing a chunk: %r' % line) diff --git a/tools/check-moz-style/modules/__init__.py b/tools/check-moz-style/modules/__init__.py new file mode 100644 index 000000000..ef65bee5b --- /dev/null +++ b/tools/check-moz-style/modules/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/tools/check-moz-style/modules/cpplint.py b/tools/check-moz-style/modules/cpplint.py new file mode 100644 index 000000000..c01e82d45 --- /dev/null +++ b/tools/check-moz-style/modules/cpplint.py @@ -0,0 +1,3150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is the modified version of Google's cpplint. The original code is +# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py + +"""Does WebKit-lint on c++ files. + +The goal of this script is to identify places in the code that *may* +be in non-compliance with WebKit style. It does not attempt to fix +up these problems -- the point is to educate. It does also not +attempt to find all problems, or to ensure that everything it does +find is legitimately a problem. + +In particular, we can get very confused by /* and // inside strings! +We do a small hack, which is to ignore //'s with "'s after them on the +same line, but it is far from perfect (in either direction). +""" + +import codecs +import getopt +import math # for log +import os +import os.path +import re +import sre_compile +import string +import sys +import unicodedata + + +_USAGE = """ +Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] + [file] ... + + The style guidelines this tries to follow are those in + http://webkit.org/coding/coding-style.html + + Every problem is given a confidence score from 1-5, with 5 meaning we are + certain of the problem, and 1 meaning it could be a legitimate construct. + This will miss some errors, and is not a substitute for a code review. + + To prevent specific lines from being linted, add a '// NOLINT' comment to the + end of the line. + + The files passed in will be linted; at least one file must be provided. + Linted extensions are .cpp, .c and .h. Other file types will be ignored. + + Flags: + + output=vs7 + By default, the output is formatted to ease emacs parsing. Visual Studio + compatible output (vs7) may also be used. Other formats are unsupported. + + verbose=# + Specify a number 0-5 to restrict errors to certain verbosity levels. + + filter=-x,+y,... + Specify a comma-separated list of category-filters to apply: only + error messages whose category names pass the filters will be printed. + (Category names are printed with the message and look like + "[whitespace/indent]".) Filters are evaluated left to right. + "-FOO" and "FOO" means "do not print categories that start with FOO". + "+FOO" means "do print categories that start with FOO". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=whitespace,runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + + To see a list of all the categories used in cpplint, pass no arg: + --filter= +""" + +# We categorize each error message we print. Here are the categories. +# We want an explicit list so we can list them all in cpplint --filter=. +# If you add a new error message with a new category, add it to the list +# here! cpplint_unittest.py should tell you if you forget to do this. +# \ used for clearer layout -- pylint: disable-msg=C6013 +_ERROR_CATEGORIES = '''\ + build/class + build/deprecated + build/endif_comment + build/forward_decl + build/header_guard + build/include + build/include_order + build/include_what_you_use + build/namespaces + build/printf_format + build/storage_class + legal/copyright + readability/braces + readability/casting + readability/check + readability/comparison_to_zero + readability/constructors + readability/control_flow + readability/fn_size + readability/function + readability/multiline_comment + readability/multiline_string + readability/null + readability/streams + readability/todo + readability/utf8 + runtime/arrays + runtime/casting + runtime/explicit + runtime/int + runtime/init + runtime/invalid_increment + runtime/memset + runtime/printf + runtime/printf_format + runtime/references + runtime/rtti + runtime/sizeof + runtime/string + runtime/threadsafe_fn + runtime/virtual + whitespace/blank_line + whitespace/braces + whitespace/comma + whitespace/comments + whitespace/comments-doublespace + whitespace/end_of_line + whitespace/ending_newline + whitespace/indent + whitespace/labels + whitespace/line_length + whitespace/newline + whitespace/operators + whitespace/parens + whitespace/semicolon + whitespace/tab + whitespace/todo +''' + +# The default state of the category filter. This is overrided by the --filter= +# flag. By default all errors are on, so only add here categories that should be +# off by default (i.e., categories that must be enabled by the --filter= flags). +# All entries here should start with a '-' or '+', as in the --filter= flag. +_DEFAULT_FILTERS = [] + +# Headers that we consider STL headers. +_STL_HEADERS = frozenset([ + 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception', + 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set', + 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h', + 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack', + 'stl_alloc.h', 'stl_relops.h', 'type_traits.h', + 'utility', 'vector', 'vector.h', + ]) + + +# Non-STL C++ system headers. +_CPP_HEADERS = frozenset([ + 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype', + 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath', + 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef', + 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype', + 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream', + 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip', + 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h', + 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h', + 'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h', + 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', + 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept', + 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string', + 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray', + ]) + + +# Assertion macros. These are defined in base/logging.h and +# testing/base/gunit.h. Note that the _M versions need to come first +# for substring matching to work. +_CHECK_MACROS = [ + 'DCHECK', 'CHECK', + 'EXPECT_TRUE_M', 'EXPECT_TRUE', + 'ASSERT_TRUE_M', 'ASSERT_TRUE', + 'EXPECT_FALSE_M', 'EXPECT_FALSE', + 'ASSERT_FALSE_M', 'ASSERT_FALSE', + ] + +# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE +_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) + +for op, replacement in [('==', 'EQ'), ('!=', 'NE'), + ('>=', 'GE'), ('>', 'GT'), + ('<=', 'LE'), ('<', 'LT')]: + _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement + _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement + +for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), + ('>=', 'LT'), ('>', 'LE'), + ('<=', 'GT'), ('<', 'GE')]: + _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement + _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement + + +# These constants define types of headers for use with +# _IncludeState.check_next_include_order(). +_CONFIG_HEADER = 0 +_PRIMARY_HEADER = 1 +_OTHER_HEADER = 2 + + +_regexp_compile_cache = {} + + +def match(pattern, s): + """Matches the string with the pattern, caching the compiled regexp.""" + # The regexp compilation caching is inlined in both match and search for + # performance reasons; factoring it out into a separate function turns out + # to be noticeably expensive. + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].match(s) + + +def search(pattern, s): + """Searches the string for the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].search(s) + + +class _IncludeState(dict): + """Tracks line numbers for includes, and the order in which includes appear. + + As a dict, an _IncludeState object serves as a mapping between include + filename and line number on which that file was included. + + Call check_next_include_order() once for each header in the file, passing + in the type constants defined above. Calls in an illegal order will + raise an _IncludeError with an appropriate error message. + + """ + # self._section will move monotonically through this set. If it ever + # needs to move backwards, check_next_include_order will raise an error. + _INITIAL_SECTION = 0 + _CONFIG_SECTION = 1 + _PRIMARY_SECTION = 2 + _OTHER_SECTION = 3 + + _TYPE_NAMES = { + _CONFIG_HEADER: 'WebCore config.h', + _PRIMARY_HEADER: 'header this file implements', + _OTHER_HEADER: 'other header', + } + _SECTION_NAMES = { + _INITIAL_SECTION: "... nothing.", + _CONFIG_SECTION: "WebCore config.h.", + _PRIMARY_SECTION: 'a header this file implements.', + _OTHER_SECTION: 'other header.', + } + + def __init__(self): + dict.__init__(self) + self._section = self._INITIAL_SECTION + self._visited_primary_section = False + self.header_types = dict(); + + def visited_primary_section(self): + return self._visited_primary_section + + def check_next_include_order(self, header_type, file_is_header): + """Returns a non-empty error message if the next header is out of order. + + This function also updates the internal state to be ready to check + the next include. + + Args: + header_type: One of the _XXX_HEADER constants defined above. + file_is_header: Whether the file that owns this _IncludeState is itself a header + + Returns: + The empty string if the header is in the right order, or an + error message describing what's wrong. + + """ + if header_type == _CONFIG_HEADER and file_is_header: + return 'Header file should not contain WebCore config.h.' + if header_type == _PRIMARY_HEADER and file_is_header: + return 'Header file should not contain itself.' + + error_message = '' + if self._section != self._OTHER_SECTION: + before_error_message = ('Found %s before %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section + 1])) + after_error_message = ('Found %s after %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section])) + + if header_type == _CONFIG_HEADER: + if self._section >= self._CONFIG_SECTION: + error_message = after_error_message + self._section = self._CONFIG_SECTION + elif header_type == _PRIMARY_HEADER: + if self._section >= self._PRIMARY_SECTION: + error_message = after_error_message + elif self._section < self._CONFIG_SECTION: + error_message = before_error_message + self._section = self._PRIMARY_SECTION + self._visited_primary_section = True + else: + assert header_type == _OTHER_HEADER + if not file_is_header and self._section < self._PRIMARY_SECTION: + error_message = before_error_message + self._section = self._OTHER_SECTION + + return error_message + + +class _CppLintState(object): + """Maintains module-wide state..""" + + def __init__(self): + self.verbose_level = 1 # global setting. + self.error_count = 0 # global count of reported errors + # filters to apply when emitting error messages + self.filters = _DEFAULT_FILTERS[:] + + # output format: + # "emacs" - format that emacs can parse (default) + # "vs7" - format that Microsoft Visual Studio 7 can parse + self.output_format = 'emacs' + + self.output_stream = sys.stderr + + def set_output_format(self, output_format): + """Sets the output format for errors.""" + self.output_format = output_format + + def set_verbose_level(self, level): + """Sets the module's verbosity, and returns the previous setting.""" + last_verbose_level = self.verbose_level + self.verbose_level = level + return last_verbose_level + + def set_filters(self, filters): + """Sets the error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "+whitespace/indent"). + Each filter should start with + or -; else we die. + + Raises: + ValueError: The comma-separated filters did not all start with '+' or '-'. + E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" + """ + # Default filters always have less priority than the flag ones. + self.filters = _DEFAULT_FILTERS[:] + for filter in filters.split(','): + clean_filter = filter.strip() + if clean_filter: + self.filters.append(clean_filter) + for filter in self.filters: + if not (filter.startswith('+') or filter.startswith('-')): + raise ValueError('Every filter in --filter must start with ' + '+ or - (%s does not)' % filter) + + def reset_error_count(self): + """Sets the module's error statistic back to zero.""" + self.error_count = 0 + + def increment_error_count(self): + """Bumps the module's error statistic.""" + self.error_count += 1 + + def set_stream(self, stream): + self.output_stream = stream + + def write_error(self, error): + self.output_stream.write(error) + + +_cpplint_state = _CppLintState() + + +def _output_format(): + """Gets the module's output format.""" + return _cpplint_state.output_format + + +def _set_output_format(output_format): + """Sets the module's output format.""" + _cpplint_state.set_output_format(output_format) + + +def _verbose_level(): + """Returns the module's verbosity setting.""" + return _cpplint_state.verbose_level + + +def _set_verbose_level(level): + """Sets the module's verbosity, and returns the previous setting.""" + return _cpplint_state.set_verbose_level(level) + + +def _filters(): + """Returns the module's list of output filters, as a list.""" + return _cpplint_state.filters + + +def _set_filters(filters): + """Sets the module's error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "whitespace/indent"). + Each filter should start with + or -; else we die. + """ + _cpplint_state.set_filters(filters) + + +def error_count(): + """Returns the global count of reported errors.""" + return _cpplint_state.error_count + + +class _FunctionState(object): + """Tracks current function name and the number of lines in its body.""" + + _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. + _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. + + def __init__(self): + self.in_a_function = False + self.lines_in_function = 0 + self.current_function = '' + + def begin(self, function_name): + """Start analyzing function body. + + Args: + function_name: The name of the function being tracked. + """ + self.in_a_function = True + self.lines_in_function = 0 + self.current_function = function_name + + def count(self): + """Count line in current function body.""" + if self.in_a_function: + self.lines_in_function += 1 + + def check(self, error, filename, line_number): + """Report if too many lines in function body. + + Args: + error: The function to call with any errors found. + filename: The name of the current file. + line_number: The number of the line to check. + """ + if match(r'T(EST|est)', self.current_function): + base_trigger = self._TEST_TRIGGER + else: + base_trigger = self._NORMAL_TRIGGER + trigger = base_trigger * 2 ** _verbose_level() + + if self.lines_in_function > trigger: + error_level = int(math.log(self.lines_in_function / base_trigger, 2)) + # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... + if error_level > 5: + error_level = 5 + error(filename, line_number, 'readability/fn_size', error_level, + 'Small and focused functions are preferred:' + ' %s has %d non-comment lines' + ' (error triggered by exceeding %d lines).' % ( + self.current_function, self.lines_in_function, trigger)) + + def end(self): + """Stop analizing function body.""" + self.in_a_function = False + + +class _IncludeError(Exception): + """Indicates a problem with the include order in a file.""" + pass + + +class FileInfo: + """Provides utility functions for filenames. + + FileInfo provides easy access to the components of a file's path + relative to the project root. + """ + + def __init__(self, filename): + self._filename = filename + + def full_name(self): + """Make Windows paths like Unix.""" + return os.path.abspath(self._filename).replace('\\', '/') + + def repository_name(self): + """Full name after removing the local path to the repository. + + If we have a real absolute path name here we can try to do something smart: + detecting the root of the checkout and truncating /path/to/checkout from + the name so that we get header guards that don't include things like + "C:\Documents and Settings\..." or "/home/username/..." in them and thus + people on different computers who have checked the source out to different + locations won't see bogus errors. + """ + fullname = self.full_name() + + if os.path.exists(fullname): + project_dir = os.path.dirname(fullname) + + if os.path.exists(os.path.join(project_dir, ".svn")): + # If there's a .svn file in the current directory, we + # recursively look up the directory tree for the top + # of the SVN checkout + root_dir = project_dir + one_up_dir = os.path.dirname(root_dir) + while os.path.exists(os.path.join(one_up_dir, ".svn")): + root_dir = os.path.dirname(root_dir) + one_up_dir = os.path.dirname(one_up_dir) + + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Not SVN? Try to find a git top level directory by + # searching up from the current path. + root_dir = os.path.dirname(fullname) + while (root_dir != os.path.dirname(root_dir) + and not os.path.exists(os.path.join(root_dir, ".git"))): + root_dir = os.path.dirname(root_dir) + if os.path.exists(os.path.join(root_dir, ".git")): + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Don't know what to do; header guard warnings may be wrong... + return fullname + + def split(self): + """Splits the file into the directory, basename, and extension. + + For 'chrome/browser/browser.cpp', Split() would + return ('chrome/browser', 'browser', '.cpp') + + Returns: + A tuple of (directory, basename, extension). + """ + + googlename = self.repository_name() + project, rest = os.path.split(googlename) + return (project,) + os.path.splitext(rest) + + def base_name(self): + """File base name - text after the final slash, before the final period.""" + return self.split()[1] + + def extension(self): + """File extension - text following the final period.""" + return self.split()[2] + + def no_extension(self): + """File has no source file extension.""" + return '/'.join(self.split()[0:2]) + + def is_source(self): + """File has a source file extension.""" + return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + + +def _should_print_error(category, confidence): + """Returns true iff confidence >= verbose, and category passes filter.""" + # There are two ways we might decide not to print an error message: + # the verbosity level isn't high enough, or the filters filter it out. + if confidence < _cpplint_state.verbose_level: + return False + + is_filtered = False + for one_filter in _filters(): + if one_filter.startswith('-'): + if category.startswith(one_filter[1:]): + is_filtered = True + elif one_filter.startswith('+'): + if category.startswith(one_filter[1:]): + is_filtered = False + else: + assert False # should have been checked for in set_filter. + if is_filtered: + return False + + return True + + +def error(filename, line_number, category, confidence, message): + """Logs the fact we've found a lint error. + + We log where the error was found, and also our confidence in the error, + that is, how certain we are this is a legitimate style regression, and + not a misidentification or a use that's sometimes justified. + + Args: + filename: The name of the file containing the error. + line_number: The number of the line containing the error. + category: A string used to describe the "category" this bug + falls under: "whitespace", say, or "runtime". Categories + may have a hierarchy separated by slashes: "whitespace/indent". + confidence: A number from 1-5 representing a confidence score for + the error, with 5 meaning that we are certain of the problem, + and 1 meaning that it could be a legitimate construct. + message: The error message. + """ + # There are two ways we might decide not to print an error message: + # the verbosity level isn't high enough, or the filters filter it out. + if _should_print_error(category, confidence): + _cpplint_state.increment_error_count() + if _cpplint_state.output_format == 'vs7': + write_error('%s(%s): %s [%s] [%d]\n' % ( + filename, line_number, message, category, confidence)) + else: + write_error('%s:%s: %s [%s] [%d]\n' % ( + filename, line_number, message, category, confidence)) + + +# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard. +_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( + r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') +# Matches strings. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') +# Matches characters. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") +# Matches multi-line C++ comments. +# This RE is a little bit more complicated than one might expect, because we +# have to take care of space removals tools so we can handle comments inside +# statements better. +# The current rule is: We only clear spaces from both sides when we're at the +# end of the line. Otherwise, we try to remove spaces from the right side, +# if this doesn't work we try on left side but only if there's a non-character +# on the right. +_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( + r"""(\s*/\*.*\*/\s*$| + /\*.*\*/\s+| + \s+/\*.*\*/(?=\W)| + /\*.*\*/)""", re.VERBOSE) + + +def is_cpp_string(line): + """Does line terminate so, that the next symbol is in string constant. + + This function does not consider single-line nor multi-line comments. + + Args: + line: is a partial line of code starting from the 0..n. + + Returns: + True, if next character appended to 'line' is inside a + string constant. + """ + + line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" + return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 + + +def find_next_multi_line_comment_start(lines, line_index): + """Find the beginning marker for a multiline comment.""" + while line_index < len(lines): + if lines[line_index].strip().startswith('/*'): + # Only return this marker if the comment goes beyond this line + if lines[line_index].strip().find('*/', 2) < 0: + return line_index + line_index += 1 + return len(lines) + + +def find_next_multi_line_comment_end(lines, line_index): + """We are inside a comment, find the end marker.""" + while line_index < len(lines): + if lines[line_index].strip().endswith('*/'): + return line_index + line_index += 1 + return len(lines) + + +def remove_multi_line_comments_from_range(lines, begin, end): + """Clears a range of lines for multi-line comments.""" + # Having // dummy comments makes the lines non-empty, so we will not get + # unnecessary blank line warnings later in the code. + for i in range(begin, end): + lines[i] = '// dummy' + + +def remove_multi_line_comments(filename, lines, error): + """Removes multiline (c-style) comments from lines.""" + line_index = 0 + while line_index < len(lines): + line_index_begin = find_next_multi_line_comment_start(lines, line_index) + if line_index_begin >= len(lines): + return + line_index_end = find_next_multi_line_comment_end(lines, line_index_begin) + if line_index_end >= len(lines): + error(filename, line_index_begin + 1, 'readability/multiline_comment', 5, + 'Could not find end of multi-line comment') + return + remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1) + line_index = line_index_end + 1 + + +def cleanse_comments(line): + """Removes //-comments and single-line C-style /* */ comments. + + Args: + line: A line of C++ source. + + Returns: + The line with single-line comments removed. + """ + comment_position = line.find('//') + if comment_position != -1 and not is_cpp_string(line[:comment_position]): + line = line[:comment_position] + # get rid of /* ... */ + return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) + + +class CleansedLines(object): + """Holds 3 copies of all lines with different preprocessing applied to them. + + 1) elided member contains lines without strings and comments, + 2) lines member contains lines without comments, and + 3) raw member contains all the lines without processing. + All these three members are of , and of the same length. + """ + + def __init__(self, lines): + self.elided = [] + self.lines = [] + self.raw_lines = lines + self._num_lines = len(lines) + for line_number in range(len(lines)): + self.lines.append(cleanse_comments(lines[line_number])) + elided = self.collapse_strings(lines[line_number]) + self.elided.append(cleanse_comments(elided)) + + def num_lines(self): + """Returns the number of lines represented.""" + return self._num_lines + + @staticmethod + def collapse_strings(elided): + """Collapses strings and chars on a line to simple "" or '' blocks. + + We nix strings first so we're not fooled by text like '"http://"' + + Args: + elided: The line being processed. + + Returns: + The line with collapsed strings. + """ + if not _RE_PATTERN_INCLUDE.match(elided): + # Remove escaped characters first to make quote/single quote collapsing + # basic. Things that look like escaped characters shouldn't occur + # outside of strings and chars. + elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) + elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) + elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) + return elided + + +def close_expression(clean_lines, line_number, pos): + """If input points to ( or { or [, finds the position that closes it. + + If lines[line_number][pos] points to a '(' or '{' or '[', finds the the + line_number/pos that correspond to the closing of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, line_number, pos) pointer *past* the closing brace, or + (line, len(lines), -1) if we never find a close. Note we ignore + strings and comments when matching; and the line we return is the + 'cleansed' line at line_number. + """ + + line = clean_lines.elided[line_number] + start_character = line[pos] + if start_character not in '({[': + return (line, clean_lines.num_lines(), -1) + if start_character == '(': + end_character = ')' + if start_character == '[': + end_character = ']' + if start_character == '{': + end_character = '}' + + num_open = line.count(start_character) - line.count(end_character) + while line_number < clean_lines.num_lines() and num_open > 0: + line_number += 1 + line = clean_lines.elided[line_number] + num_open += line.count(start_character) - line.count(end_character) + # OK, now find the end_character that actually got us back to even + endpos = len(line) + while num_open >= 0: + endpos = line.rfind(')', 0, endpos) + num_open -= 1 # chopped off another ) + return (line, line_number, endpos + 1) + + +def check_for_copyright(filename, lines, error): + """Logs an error if no Copyright message appears at the top of the file.""" + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. + for line in xrange(1, min(len(lines), 11)): + if re.search(r'Copyright|License', lines[line], re.I): + break + else: # means no copyright line was found + error(filename, 1, 'legal/copyright', 3, + 'No copyright message found.') + + +def get_header_guard_cpp_variable(filename): + """Returns the CPP variable that should be used as a header guard. + + Args: + filename: The name of a C++ header file. + + Returns: + The CPP variable that should be used as a header guard in the + named file. + + """ + + fileinfo = FileInfo(filename) + return re.sub(r'[-./\s]', '_', fileinfo.repository_name()).upper() + '_' + + +def check_for_header_guard(filename, lines, error): + """Checks that the file contains a header guard. + + Logs an error if no #ifndef header guard is present. For other + headers, checks that the full pathname is used. + + Args: + filename: The name of the C++ header file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + cppvar = get_header_guard_cpp_variable(filename) + + ifndef = None + ifndef_line_number = 0 + define = None + endif = None + endif_line_number = 0 + for line_number, line in enumerate(lines): + line_split = line.split() + if len(line_split) >= 2: + # find the first occurrence of #ifndef and #define, save arg + if not ifndef and line_split[0] == '#ifndef': + # set ifndef to the header guard presented on the #ifndef line. + ifndef = line_split[1] + ifndef_line_number = line_number + if not define and line_split[0] == '#define': + define = line_split[1] + # find the last occurrence of #endif, save entire line + if line.startswith('#endif'): + endif = line + endif_line_number = line_number + + if not ifndef or not define or ifndef != define: + error(filename, 1, 'build/header_guard', 5, + 'No #ifndef header guard found, suggested CPP variable is: %s' % + cppvar) + return + + # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ + # for backward compatibility. + if ifndef != cppvar: + error_level = 0 + if ifndef != cppvar + '_': + error_level = 5 + + error(filename, ifndef_line_number, 'build/header_guard', error_level, + '#ifndef header guard has wrong style, please use: %s' % cppvar) + + if endif != ('#endif // %s' % cppvar): + error_level = 0 + if endif != ('#endif // %s' % (cppvar + '_')): + error_level = 5 + + error(filename, endif_line_number, 'build/header_guard', error_level, + '#endif line should be "#endif // %s"' % cppvar) + + +def check_for_unicode_replacement_characters(filename, lines, error): + """Logs an error for each line containing Unicode replacement characters. + + These indicate that either the file contained invalid UTF-8 (likely) + or Unicode replacement characters (which it shouldn't). Note that + it's possible for this to throw off line numbering if the invalid + UTF-8 occurred adjacent to a newline. + + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + for line_number, line in enumerate(lines): + if u'\ufffd' in line: + error(filename, line_number, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + + +def check_for_new_line_at_eof(filename, lines, error): + """Logs an error if there is no newline char at the end of the file. + + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + # The array lines() was created by adding two newlines to the + # original file (go figure), then splitting on \n. + # To verify that the file ends in \n, we just have to make sure the + # last-but-two element of lines() exists and is empty. + if len(lines) < 3 or lines[-2]: + error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, + 'Could not find a newline character at the end of the file.') + + +def check_for_multiline_comments_and_strings(filename, clean_lines, line_number, error): + """Logs an error if we see /* ... */ or "..." that extend past one line. + + /* ... */ comments are legit inside macros, for one line. + Otherwise, we prefer // comments, so it's ok to warn about the + other. Likewise, it's ok for strings to extend across multiple + lines, as long as a line continuation character (backslash) + terminates each line. Although not currently prohibited by the C++ + style guide, it's ugly and unnecessary. We don't do well with either + in this lint program, so we warn about both. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + + # Remove all \\ (escaped backslashes) from the line. They are OK, and the + # second (escaped) slash may trigger later \" detection erroneously. + line = line.replace('\\\\', '') + + if line.count('/*') > line.count('*/'): + error(filename, line_number, 'readability/multiline_comment', 5, + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. ' + 'Consider replacing these with //-style comments, ' + 'with #if 0...#endif, ' + 'or with more clearly structured multi-line comments.') + + if (line.count('"') - line.count('\\"')) % 2: + error(filename, line_number, 'readability/multiline_string', 5, + 'Multi-line string ("...") found. This lint script doesn\'t ' + 'do well with such strings, and may give bogus warnings. They\'re ' + 'ugly and unnecessary, and you should use concatenation instead".') + + +_THREADING_LIST = ( + ('asctime(', 'asctime_r('), + ('ctime(', 'ctime_r('), + ('getgrgid(', 'getgrgid_r('), + ('getgrnam(', 'getgrnam_r('), + ('getlogin(', 'getlogin_r('), + ('getpwnam(', 'getpwnam_r('), + ('getpwuid(', 'getpwuid_r('), + ('gmtime(', 'gmtime_r('), + ('localtime(', 'localtime_r('), + ('rand(', 'rand_r('), + ('readdir(', 'readdir_r('), + ('strtok(', 'strtok_r('), + ('ttyname(', 'ttyname_r('), + ) + + +def check_posix_threading(filename, clean_lines, line_number, error): + """Checks for calls to thread-unsafe functions. + + Much code has been originally written without consideration of + multi-threading. Also, engineers are relying on their old experience; + they have learned posix before threading extensions were added. These + tests guide the engineers to use thread-safe functions (when using + posix directly). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + for single_thread_function, multithread_safe_function in _THREADING_LIST: + index = line.find(single_thread_function) + # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 + if index >= 0 and (index == 0 or (not line[index - 1].isalnum() + and line[index - 1] not in ('_', '.', '>'))): + error(filename, line_number, 'runtime/threadsafe_fn', 2, + 'Consider using ' + multithread_safe_function + + '...) instead of ' + single_thread_function + + '...) for improved thread safety.') + + +# Matches invalid increment: *count++, which moves pointer instead of +# incrementing a value. +_RE_PATTERN_INVALID_INCREMENT = re.compile( + r'^\s*\*\w+(\+\+|--);') + + +def check_invalid_increment(filename, clean_lines, line_number, error): + """Checks for invalid increment *count++. + + For example following function: + void increment_counter(int* count) { + *count++; + } + is invalid, because it effectively does count++, moving pointer, and should + be replaced with ++*count, (*count)++ or *count += 1. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + if _RE_PATTERN_INVALID_INCREMENT.match(line): + error(filename, line_number, 'runtime/invalid_increment', 5, + 'Changing pointer instead of value (or unused value of operator*).') + + +class _ClassInfo(object): + """Stores information about a class.""" + + def __init__(self, name, line_number): + self.name = name + self.line_number = line_number + self.seen_open_brace = False + self.is_derived = False + self.virtual_method_line_number = None + self.has_virtual_destructor = False + self.brace_depth = 0 + + +class _ClassState(object): + """Holds the current state of the parse relating to class declarations. + + It maintains a stack of _ClassInfos representing the parser's guess + as to the current nesting of class declarations. The innermost class + is at the top (back) of the stack. Typically, the stack will either + be empty or have exactly one entry. + """ + + def __init__(self): + self.classinfo_stack = [] + + def check_finished(self, filename, error): + """Checks that all classes have been completely parsed. + + Call this when all lines in a file have been processed. + Args: + filename: The name of the current file. + error: The function to call with any errors found. + """ + if self.classinfo_stack: + # Note: This test can result in false positives if #ifdef constructs + # get in the way of brace matching. See the testBuildClass test in + # cpplint_unittest.py for an example of this. + error(filename, self.classinfo_stack[0].line_number, 'build/class', 5, + 'Failed to find complete declaration of class %s' % + self.classinfo_stack[0].name) + + +def check_for_non_standard_constructs(filename, clean_lines, line_number, + class_state, error): + """Logs an error if we see certain non-ANSI constructs ignored by gcc-2. + + Complain about several constructs which gcc-2 accepts, but which are + not standard C++. Warning about these in lint is one way to ease the + transition to new compilers. + - put storage class first (e.g. "static const" instead of "const static"). + - "%lld" instead of %qd" in printf-type functions. + - "%1$d" is non-standard in printf-type functions. + - "\%" is an undefined character escape sequence. + - text after #endif is not allowed. + - invalid inner-style forward declaration. + - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): + error(filename, line_number, 'build/deprecated', 3, + '>? and ,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line) + if class_decl_match: + classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number)) + + # Everything else in this function uses the top of the stack if it's + # not empty. + if not classinfo_stack: + return + + classinfo = classinfo_stack[-1] + + # If the opening brace hasn't been seen look for it and also + # parent class declarations. + if not classinfo.seen_open_brace: + # If the line has a ';' in it, assume it's a forward declaration or + # a single-line class declaration, which we won't process. + if line.find(';') != -1: + classinfo_stack.pop() + return + classinfo.seen_open_brace = (line.find('{') != -1) + # Look for a bare ':' + if search('(^|[^:]):($|[^:])', line): + classinfo.is_derived = True + if not classinfo.seen_open_brace: + return # Everything else in this function is for after open brace + + # The class may have been declared with namespace or classname qualifiers. + # The constructor and destructor will not have those qualifiers. + base_classname = classinfo.name.split('::')[-1] + + # Look for single-argument constructors that aren't marked explicit. + # Technically a valid construct, but against style. + args = match(r'(?= 0 + and match(r' {6}\w', elided[search_position])): + search_position -= 1 + exception = (search_position >= 0 + and elided[search_position][:5] == ' :') + else: + # Search for the function arguments or an initializer list. We use a + # simple heuristic here: If the line is indented 4 spaces; and we have a + # closing paren, without the opening paren, followed by an opening brace + # or colon (for initializer lists) we assume that it is the last line of + # a function header. If we have a colon indented 4 spaces, it is an + # initializer list. + exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', + previous_line) + or match(r' {4}:', previous_line)) + + if not exception: + error(filename, line_number, 'whitespace/blank_line', 2, + 'Blank line at the start of a code block. Is this needed?') + # This doesn't ignore whitespace at the end of a namespace block + # because that is too hard without pairing open/close braces; + # however, a special exception is made for namespace closing + # brackets which have a comment containing "namespace". + # + # Also, ignore blank lines at the end of a block in a long if-else + # chain, like this: + # if (condition1) { + # // Something followed by a blank line + # + # } else if (condition2) { + # // Something else + # } + if line_number + 1 < clean_lines.num_lines(): + next_line = raw[line_number + 1] + if (next_line + and match(r'\s*}', next_line) + and next_line.find('namespace') == -1 + and next_line.find('} else ') == -1): + error(filename, line_number, 'whitespace/blank_line', 3, + 'Blank line at the end of a code block. Is this needed?') + + # Next, we complain if there's a comment too near the text + comment_position = line.find('//') + if comment_position != -1: + # Check if the // may be in quotes. If so, ignore it + # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 + if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes + # Allow one space for new scopes, two spaces otherwise: + if (not match(r'^\s*{ //', line) + and ((comment_position >= 1 + and line[comment_position-1] not in string.whitespace) + or (comment_position >= 2 + and line[comment_position-2] not in string.whitespace))): + error(filename, line_number, 'whitespace/comments-doublespace', 2, + 'At least two spaces is best between code and comments') + # There should always be a space between the // and the comment + commentend = comment_position + 2 + if commentend < len(line) and not line[commentend] == ' ': + # but some lines are exceptions -- e.g. if they're big + # comment delimiters like: + # //---------------------------------------------------------- + # or they begin with multiple slashes followed by a space: + # //////// Header comment + matched = (search(r'[=/-]{4,}\s*$', line[commentend:]) + or search(r'^/+ ', line[commentend:])) + if not matched: + error(filename, line_number, 'whitespace/comments', 4, + 'Should have a space between // and comment') + + line = clean_lines.elided[line_number] # get rid of comments and strings + + # Don't try to do spacing checks for operator methods + line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) + + # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". + # Otherwise not. Note we only check for non-spaces on *both* sides; + # sometimes people put non-spaces on one side when aligning ='s among + # many lines (not that this is behavior that I approve of...) + if search(r'[\w.]=[\w.]', line) and not search(r'\b(if|while) ', line): + error(filename, line_number, 'whitespace/operators', 4, + 'Missing spaces around =') + + # FIXME: It's not ok to have spaces around binary operators like + - * / . + + # You should always have whitespace around binary operators. + # Alas, we can't test < or > because they're legitimately used sans spaces + # (a->b, vector a). The only time we can tell is a < with no >, and + # only if it's not template params list spilling into the next line. + matched = search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line) + if not matched: + # Note that while it seems that the '<[^<]*' term in the following + # regexp could be simplified to '<.*', which would indeed match + # the same class of strings, the [^<] means that searching for the + # regexp takes linear rather than quadratic time. + if not search(r'<[^<]*,\s*$', line): # template params spill + matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line) + if matched: + error(filename, line_number, 'whitespace/operators', 3, + 'Missing spaces around %s' % matched.group(1)) + # We allow no-spaces around << and >> when used like this: 10<<20, but + # not otherwise (particularly, not when used as streams) + matched = search(r'[^0-9\s](<<|>>)[^0-9\s]', line) + if matched: + error(filename, line_number, 'whitespace/operators', 3, + 'Missing spaces around %s' % matched.group(1)) + + # There shouldn't be space around unary operators + matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) + if matched: + error(filename, line_number, 'whitespace/operators', 4, + 'Extra space for operator %s' % matched.group(1)) + + # A pet peeve of mine: no spaces after an if, while, switch, or for + matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line) + if matched: + error(filename, line_number, 'whitespace/parens', 5, + 'Missing space before ( in %s' % matched.group(1)) + + # For if/for/foreach/while/switch, the left and right parens should be + # consistent about how many spaces are inside the parens, and + # there should either be zero or one spaces inside the parens. + # We don't want: "if ( foo)" or "if ( foo )". + # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. + matched = search(r'\b(if|for|foreach|while|switch)\s*\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', + line) + if matched: + if len(matched.group(2)) != len(matched.group(4)): + if not (matched.group(3) == ';' + and len(matched.group(2)) == 1 + len(matched.group(4)) + or not matched.group(2) and search(r'\bfor\s*\(.*; \)', line)): + error(filename, line_number, 'whitespace/parens', 5, + 'Mismatching spaces inside () in %s' % matched.group(1)) + if not len(matched.group(2)) in [0, 1]: + error(filename, line_number, 'whitespace/parens', 5, + 'Should have zero or one spaces inside ( and ) in %s' % + matched.group(1)) + + # You should always have a space after a comma (either as fn arg or operator) + if search(r',[^\s]', line): + error(filename, line_number, 'whitespace/comma', 3, + 'Missing space after ,') + + # Next we will look for issues with function calls. + check_spacing_for_function_call(filename, line, line_number, error) + + # Except after an opening paren, you should have spaces before your braces. + # And since you should never have braces at the beginning of a line, this is + # an easy test. + if search(r'[^ ({]{', line): + error(filename, line_number, 'whitespace/braces', 5, + 'Missing space before {') + + # Make sure '} else {' has spaces. + if search(r'}else', line): + error(filename, line_number, 'whitespace/braces', 5, + 'Missing space before else') + + # You shouldn't have spaces before your brackets, except maybe after + # 'delete []' or 'new char * []'. + if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line): + error(filename, line_number, 'whitespace/braces', 5, + 'Extra space before [') + + # You shouldn't have a space before a semicolon at the end of the line. + # There's a special case for "for" since the style guide allows space before + # the semicolon there. + if search(r':\s*;\s*$', line): + error(filename, line_number, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement. Use { } instead.') + elif search(r'^\s*;\s*$', line): + error(filename, line_number, 'whitespace/semicolon', 5, + 'Line contains only semicolon. If this should be an empty statement, ' + 'use { } instead.') + elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)): + error(filename, line_number, 'whitespace/semicolon', 5, + 'Extra space before last semicolon. If this should be an empty ' + 'statement, use { } instead.') + elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line) + and line.count('(') == line.count(')') + # Allow do {} while(); + and not search(r'}\s*while', line)): + error(filename, line_number, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement for this loop. Use { } instead.') + + +def get_previous_non_blank_line(clean_lines, line_number): + """Return the most recent non-blank line and its line number. + + Args: + clean_lines: A CleansedLines instance containing the file contents. + line_number: The number of the line to check. + + Returns: + A tuple with two elements. The first element is the contents of the last + non-blank line before the current line, or the empty string if this is the + first non-blank line. The second is the line number of that line, or -1 + if this is the first non-blank line. + """ + + previous_line_number = line_number - 1 + while previous_line_number >= 0: + previous_line = clean_lines.elided[previous_line_number] + if not is_blank_line(previous_line): # if not a blank line... + return (previous_line, previous_line_number) + previous_line_number -= 1 + return ('', -1) + + +def check_namespace_indentation(filename, clean_lines, line_number, file_extension, error): + """Looks for indentation errors inside of namespaces. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (dot not included) of the file. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + namespace_match = match(r'(?P\s*)namespace\s+\S+\s*{\s*$', line) + if not namespace_match: + return + + namespace_indentation = namespace_match.group('namespace_indentation') + + is_header_file = file_extension == 'h' + is_implementation_file = not is_header_file + line_offset = 0 + + if is_header_file: + inner_indentation = namespace_indentation + ' ' * 4 + + for current_line in clean_lines.raw_lines[line_number + 1:]: + line_offset += 1 + + # Skip not only empty lines but also those with preprocessor directives. + # Goto labels don't occur in header files, so no need to check for those. + if current_line.strip() == '' or current_line.startswith('#'): + continue + + if not current_line.startswith(inner_indentation): + # If something unindented was discovered, make sure it's a closing brace. + if not current_line.startswith(namespace_indentation + '}'): + error(filename, line_number + line_offset, 'whitespace/indent', 4, + 'In a header, code inside a namespace should be indented.') + break + + if is_implementation_file: + for current_line in clean_lines.raw_lines[line_number + 1:]: + line_offset += 1 + + # Skip not only empty lines but also those with (goto) labels. + # The goto label regexp accepts spaces or the beginning of a + # comment (if anything) after the initial colon. + if current_line.strip() == '' or match(r'\w+\s*:([\s\/].*)?$', current_line): + continue + + remaining_line = current_line[len(namespace_indentation):] + if not match(r'\S', remaining_line): + error(filename, line_number + line_offset, 'whitespace/indent', 4, + 'In an implementation file, code inside a namespace should not be indented.') + + # Just check the first non-empty line in any case, because + # otherwise we would need to count opened and closed braces, + # which is obviously a lot more complicated. + break + + +def check_switch_indentation(filename, clean_lines, line_number, error): + """Looks for indentation errors inside of switch statements. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + switch_match = match(r'(?P\s*)switch\s*\(.+\)\s*{\s*$', line) + if not switch_match: + return + + switch_indentation = switch_match.group('switch_indentation') + inner_indentation = switch_indentation + ' ' * 4 + line_offset = 0 + encountered_nested_switch = False + + for current_line in clean_lines.elided[line_number + 1:]: + line_offset += 1 + + # Skip not only empty lines but also those with preprocessor directives. + if current_line.strip() == '' or current_line.startswith('#'): + continue + + if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line): + # Complexity alarm - another switch statement nested inside the one + # that we're currently testing. We'll need to track the extent of + # that inner switch if the upcoming label tests are still supposed + # to work correctly. Let's not do that; instead, we'll finish + # checking this line, and then leave it like that. Assuming the + # indentation is done consistently (even if incorrectly), this will + # still catch all indentation issues in practice. + encountered_nested_switch = True + + current_indentation_match = match(r'(?P\s*)(?P.*)$', current_line); + current_indentation = current_indentation_match.group('indentation') + remaining_line = current_indentation_match.group('remaining_line') + + # End the check at the end of the switch statement. + if remaining_line.startswith('}') and current_indentation == switch_indentation: + break + # Case and default branches should not be indented. The regexp also + # catches single-line cases like "default: break;" but does not trigger + # on stuff like "Document::Foo();". + elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line): + if current_indentation != switch_indentation: + error(filename, line_number + line_offset, 'whitespace/indent', 4, + 'A case label should not be indented, but line up with its switch statement.') + # Don't throw an error for multiple badly indented labels, + # one should be enough to figure out the problem. + break + # We ignore goto labels at the very beginning of a line. + elif match(r'\w+\s*:\s*$', remaining_line): + continue + # It's not a goto label, so check if it's indented at least as far as + # the switch statement plus one more level of indentation. + elif not current_indentation.startswith(inner_indentation): + error(filename, line_number + line_offset, 'whitespace/indent', 4, + 'Non-label code inside switch statements should be indented.') + # Don't throw an error for multiple badly indented statements, + # one should be enough to figure out the problem. + break + + if encountered_nested_switch: + break + + +def check_braces(filename, clean_lines, line_number, error): + """Looks for misplaced braces (e.g. at the end of line). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + """ + These don't match our style guideline: + https://developer.mozilla.org/en-US/docs/Developer_Guide/Coding_Style#Control_Structures + + TODO: Spin this off in a different rule and disable that rule for mozilla + rather then commenting this out + + + if match(r'\s*{\s*$', line): + # We allow an open brace to start a line in the case where someone + # is using braces for function definition or in a block to + # explicitly create a new scope, which is commonly used to control + # the lifetime of stack-allocated variables. We don't detect this + # perfectly: we just don't complain if the last non-whitespace + # character on the previous non-blank line is ';', ':', '{', '}', + # ')', or ') const' and doesn't begin with 'if|for|while|switch|else'. + # We also allow '#' for #endif and '=' for array initialization. + previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] + if ((not search(r'[;:}{)=]\s*$|\)\s*const\s*$', previous_line) + or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line)) + and previous_line.find('#') < 0): + error(filename, line_number, 'whitespace/braces', 4, + 'This { should be at the end of the previous line') + elif (search(r'\)\s*(const\s*)?{\s*$', line) + and line.count('(') == line.count(')') + and not search(r'\b(if|for|foreach|while|switch)\b', line)): + error(filename, line_number, 'whitespace/braces', 4, + 'Place brace on its own line for function definitions.') + + if (match(r'\s*}\s*$', line) and line_number > 1): + # We check if a closed brace has started a line to see if a + # one line control statement was previous. + previous_line = clean_lines.elided[line_number - 2] + if (previous_line.find('{') > 0 + and search(r'\b(if|for|foreach|while|else)\b', previous_line)): + error(filename, line_number, 'whitespace/braces', 4, + 'One line control clauses should not use braces.') + """ + + # An else clause should be on the same line as the preceding closing brace. + if match(r'\s*else\s*', line): + previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] + if match(r'\s*}\s*$', previous_line): + error(filename, line_number, 'whitespace/newline', 4, + 'An else should appear on the same line as the preceding }') + + # Likewise, an else should never have the else clause on the same line + if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line): + error(filename, line_number, 'whitespace/newline', 4, + 'Else clause should never be on same line as else (use 2 lines)') + + # In the same way, a do/while should never be on one line + if match(r'\s*do [^\s{]', line): + error(filename, line_number, 'whitespace/newline', 4, + 'do/while clauses should not be on a single line') + + # Braces shouldn't be followed by a ; unless they're defining a struct + # or initializing an array. + # We can't tell in general, but we can for some common cases. + previous_line_number = line_number + while True: + (previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number) + if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'): + line = previous_line + line + else: + break + if (search(r'{.*}\s*;', line) + and line.count('{') == line.count('}') + and not search(r'struct|class|enum|\s*=\s*{', line)): + error(filename, line_number, 'readability/braces', 4, + "You don't need a ; after a }") + + +def check_exit_statement_simplifications(filename, clean_lines, line_number, error): + """Looks for else or else-if statements that should be written as an + if statement when the prior if concludes with a return, break, continue or + goto statement. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + else_match = match(r'(?P\s*)(\}\s*)?else(\s+if\s*\(|(?P\s*(\{\s*)?\Z))', line) + if not else_match: + return + + else_indentation = else_match.group('else_indentation') + inner_indentation = else_indentation + ' ' * 4 + + previous_lines = clean_lines.elided[:line_number] + previous_lines.reverse() + line_offset = 0 + encountered_exit_statement = False + + for current_line in previous_lines: + line_offset -= 1 + + # Skip not only empty lines but also those with preprocessor directives + # and goto labels. + if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line): + continue + + # Skip lines with closing braces on the original indentation level. + # Even though the styleguide says they should be on the same line as + # the "else if" statement, we also want to check for instances where + # the current code does not comply with the coding style. Thus, ignore + # these lines and proceed to the line before that. + if current_line == else_indentation + '}': + continue + + current_indentation_match = match(r'(?P\s*)(?P.*)$', current_line); + current_indentation = current_indentation_match.group('indentation') + remaining_line = current_indentation_match.group('remaining_line') + + # As we're going up the lines, the first real statement to encounter + # has to be an exit statement (return, break, continue or goto) - + # otherwise, this check doesn't apply. + if not encountered_exit_statement: + # We only want to find exit statements if they are on exactly + # the same level of indentation as expected from the code inside + # the block. If the indentation doesn't strictly match then we + # might have a nested if or something, which must be ignored. + if current_indentation != inner_indentation: + break + if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line): + encountered_exit_statement = True + continue + break + + # When code execution reaches this point, we've found an exit statement + # as last statement of the previous block. Now we only need to make + # sure that the block belongs to an "if", then we can throw an error. + + # Skip lines with opening braces on the original indentation level, + # similar to the closing braces check above. ("if (condition)\n{") + if current_line == else_indentation + '{': + continue + + # Skip everything that's further indented than our "else" or "else if". + if current_indentation.startswith(else_indentation) and current_indentation != else_indentation: + continue + + # So we've got a line with same (or less) indentation. Is it an "if"? + # If yes: throw an error. If no: don't throw an error. + # Whatever the outcome, this is the end of our loop. + if match(r'if\s*\(', remaining_line): + if else_match.start('else') != -1: + error(filename, line_number + line_offset, 'readability/control_flow', 4, + 'An else statement can be removed when the prior "if" ' + 'concludes with a return, break, continue or goto statement.') + else: + error(filename, line_number + line_offset, 'readability/control_flow', 4, + 'An else if statement should be written as an if statement ' + 'when the prior "if" concludes with a return, break, ' + 'continue or goto statement.') + break + + +def replaceable_check(operator, macro, line): + """Determine whether a basic CHECK can be replaced with a more specific one. + + For example suggest using CHECK_EQ instead of CHECK(a == b) and + similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE. + + Args: + operator: The C++ operator used in the CHECK. + macro: The CHECK or EXPECT macro being called. + line: The current source line. + + Returns: + True if the CHECK can be replaced with a more specific one. + """ + + # This matches decimal and hex integers, strings, and chars (in that order). + match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')' + + # Expression to match two sides of the operator with something that + # looks like a literal, since CHECK(x == iterator) won't compile. + # This means we can't catch all the cases where a more specific + # CHECK is possible, but it's less annoying than dealing with + # extraneous warnings. + match_this = (r'\s*' + macro + r'\((\s*' + + match_constant + r'\s*' + operator + r'[^<>].*|' + r'.*[^<>]' + operator + r'\s*' + match_constant + + r'\s*\))') + + # Don't complain about CHECK(x == NULL) or similar because + # CHECK_EQ(x, NULL) won't compile (requires a cast). + # Also, don't complain about more complex boolean expressions + # involving && or || such as CHECK(a == b || c == d). + return match(match_this, line) and not search(r'NULL|&&|\|\|', line) + + +def check_check(filename, clean_lines, line_number, error): + """Checks the use of CHECK and EXPECT macros. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + # Decide the set of replacement macros that should be suggested + raw_lines = clean_lines.raw_lines + current_macro = '' + for macro in _CHECK_MACROS: + if raw_lines[line_number].find(macro) >= 0: + current_macro = macro + break + if not current_macro: + # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' + return + + line = clean_lines.elided[line_number] # get rid of comments and strings + + # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc. + for operator in ['==', '!=', '>=', '>', '<=', '<']: + if replaceable_check(operator, current_macro, line): + error(filename, line_number, 'readability/check', 2, + 'Consider using %s instead of %s(a %s b)' % ( + _CHECK_REPLACEMENT[current_macro][operator], + current_macro, operator)) + break + + +def check_for_comparisons_to_zero(filename, clean_lines, line_number, error): + # Get the line without comments and strings. + line = clean_lines.elided[line_number] + + # Include NULL here so that users don't have to convert NULL to 0 first and then get this error. + if search(r'[=!]=\s*(NULL|0|true|false)\W', line) or search(r'\W(NULL|0|true|false)\s*[=!]=', line): + error(filename, line_number, 'readability/comparison_to_zero', 5, + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.') + + +def check_for_null(filename, clean_lines, line_number, error): + # This check doesn't apply to C or Objective-C implementation files. + if filename.endswith('.c') or filename.endswith('.m'): + return + + line = clean_lines.elided[line_number] + if search(r'\bNULL\b', line): + error(filename, line_number, 'readability/null', 5, 'Use 0 instead of NULL.') + return + + line = clean_lines.raw_lines[line_number] + # See if NULL occurs in any comments in the line. If the search for NULL using the raw line + # matches, then do the check with strings collapsed to avoid giving errors for + # NULLs occurring in strings. + if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)): + error(filename, line_number, 'readability/null', 4, 'Use 0 instead of NULL.') + +def get_line_width(line): + """Determines the width of the line in column positions. + + Args: + line: A string, which may be a Unicode string. + + Returns: + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ + if isinstance(line, unicode): + width = 0 + for c in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(c) in ('W', 'F'): + width += 2 + elif not unicodedata.combining(c): + width += 1 + return width + return len(line) + + +def check_style(filename, clean_lines, line_number, file_extension, error): + """Checks rules from the 'C++ style rules' section of cppguide.html. + + Most of these rules are hard to test (naming, comment style), but we + do what we can. In particular we check for 4-space indents, line lengths, + tab usage, spaces inside code, etc. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + error: The function to call with any errors found. + """ + + raw_lines = clean_lines.raw_lines + line = raw_lines[line_number] + + if line.find('\t') != -1: + error(filename, line_number, 'whitespace/tab', 1, + 'Tab found; better to use spaces') + + # One or three blank spaces at the beginning of the line is weird; it's + # hard to reconcile that with 4-space indents. + # NOTE: here are the conditions rob pike used for his tests. Mine aren't + # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces + # if(RLENGTH > 20) complain = 0; + # if(match($0, " +(error|private|public|protected):")) complain = 0; + # if(match(prev, "&& *$")) complain = 0; + # if(match(prev, "\\|\\| *$")) complain = 0; + # if(match(prev, "[\",=><] *$")) complain = 0; + # if(match($0, " <<")) complain = 0; + # if(match(prev, " +for \\(")) complain = 0; + # if(prevodd && match(prevprev, " +for \\(")) complain = 0; + initial_spaces = 0 + cleansed_line = clean_lines.elided[line_number] + while initial_spaces < len(line) and line[initial_spaces] == ' ': + initial_spaces += 1 + if line and line[-1].isspace(): + error(filename, line_number, 'whitespace/end_of_line', 4, + 'Line ends in whitespace. Consider deleting these extra spaces.') + # There are certain situations we allow one space, notably for labels + elif ((initial_spaces == 1 or initial_spaces == 3) + and not match(r'\s*\w+\s*:\s*$', cleansed_line)): + error(filename, line_number, 'whitespace/indent', 3, + 'Weird number of spaces at line-start. ' + 'Are you using at least 2-space indent?') + # Labels should always be indented at least one space. + elif not initial_spaces and line[:2] != '//': + label_match = match(r'(?P